]> bbs.cooldavid.org Git - net-next-2.6.git/blame - tools/perf/builtin-sched.c
ipv6: AF_INET6 link address family
[net-next-2.6.git] / tools / perf / builtin-sched.c
CommitLineData
0a02ad93 1#include "builtin.h"
b1ffe8f3 2#include "perf.h"
0a02ad93
IM
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
94c744b6 9#include "util/session.h"
0a02ad93
IM
10
11#include "util/parse-options.h"
b1ffe8f3 12#include "util/trace-event.h"
0a02ad93 13
0a02ad93
IM
14#include "util/debug.h"
15
b1ffe8f3 16#include <sys/prctl.h>
0a02ad93 17
b1ffe8f3
IM
18#include <semaphore.h>
19#include <pthread.h>
20#include <math.h>
419ab0d6 21
ec156764 22static char const *input_name = "perf.data";
0a02ad93 23
daa1d7a5 24static char default_sort_order[] = "avg, max, switch, runtime";
edb7c60e 25static const char *sort_order = default_sort_order;
daa1d7a5 26
55ffb7a6
MG
27static int profile_cpu = -1;
28
b1ffe8f3
IM
29#define PR_SET_NAME 15 /* Set process name */
30#define MAX_CPUS 4096
0a02ad93 31
b1ffe8f3
IM
32static u64 run_measurement_overhead;
33static u64 sleep_measurement_overhead;
ec156764 34
b1ffe8f3
IM
35#define COMM_LEN 20
36#define SYM_LEN 129
ec156764 37
b1ffe8f3 38#define MAX_PID 65536
ec156764 39
b1ffe8f3 40static unsigned long nr_tasks;
ec156764 41
39aeb52f 42struct sched_atom;
ec156764 43
b1ffe8f3
IM
44struct task_desc {
45 unsigned long nr;
46 unsigned long pid;
47 char comm[COMM_LEN];
ec156764 48
b1ffe8f3
IM
49 unsigned long nr_events;
50 unsigned long curr_event;
39aeb52f 51 struct sched_atom **atoms;
b1ffe8f3
IM
52
53 pthread_t thread;
54 sem_t sleep_sem;
ec156764 55
b1ffe8f3
IM
56 sem_t ready_for_work;
57 sem_t work_done_sem;
58
59 u64 cpu_usage;
60};
61
62enum sched_event_type {
63 SCHED_EVENT_RUN,
64 SCHED_EVENT_SLEEP,
65 SCHED_EVENT_WAKEUP,
55ffb7a6 66 SCHED_EVENT_MIGRATION,
b1ffe8f3
IM
67};
68
39aeb52f 69struct sched_atom {
b1ffe8f3 70 enum sched_event_type type;
eed05fe7 71 int specific_wait;
b1ffe8f3
IM
72 u64 timestamp;
73 u64 duration;
74 unsigned long nr;
b1ffe8f3
IM
75 sem_t *wait_sem;
76 struct task_desc *wakee;
77};
78
79static struct task_desc *pid_to_task[MAX_PID];
80
81static struct task_desc **tasks;
82
83static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
84static u64 start_time;
85
86static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
ec156764 87
b1ffe8f3
IM
88static unsigned long nr_run_events;
89static unsigned long nr_sleep_events;
90static unsigned long nr_wakeup_events;
91
92static unsigned long nr_sleep_corrections;
93static unsigned long nr_run_events_optimized;
94
95static unsigned long targetless_wakeups;
96static unsigned long multitarget_wakeups;
97
98static u64 cpu_usage;
99static u64 runavg_cpu_usage;
100static u64 parent_cpu_usage;
101static u64 runavg_parent_cpu_usage;
102
103static unsigned long nr_runs;
104static u64 sum_runtime;
105static u64 sum_fluct;
106static u64 run_avg;
107
1967936d 108static unsigned int replay_repeat = 10;
ea57c4f5 109static unsigned long nr_timestamps;
dc02bf71
IM
110static unsigned long nr_unordered_timestamps;
111static unsigned long nr_state_machine_bugs;
c8a37751 112static unsigned long nr_context_switch_bugs;
dc02bf71
IM
113static unsigned long nr_events;
114static unsigned long nr_lost_chunks;
115static unsigned long nr_lost_events;
b1ffe8f3
IM
116
117#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
118
119enum thread_state {
120 THREAD_SLEEPING = 0,
121 THREAD_WAIT_CPU,
122 THREAD_SCHED_IN,
123 THREAD_IGNORE
124};
125
126struct work_atom {
127 struct list_head list;
128 enum thread_state state;
aa1ab9d2 129 u64 sched_out_time;
b1ffe8f3
IM
130 u64 wake_up_time;
131 u64 sched_in_time;
132 u64 runtime;
133};
134
39aeb52f 135struct work_atoms {
136 struct list_head work_list;
b1ffe8f3
IM
137 struct thread *thread;
138 struct rb_node node;
139 u64 max_lat;
3786310a 140 u64 max_lat_at;
b1ffe8f3
IM
141 u64 total_lat;
142 u64 nb_atoms;
143 u64 total_runtime;
144};
145
39aeb52f 146typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
b1ffe8f3
IM
147
148static struct rb_root atom_root, sorted_atom_root;
149
150static u64 all_runtime;
151static u64 all_count;
152
b1ffe8f3
IM
153
154static u64 get_nsecs(void)
ec156764
IM
155{
156 struct timespec ts;
157
158 clock_gettime(CLOCK_MONOTONIC, &ts);
159
160 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
161}
162
b1ffe8f3 163static void burn_nsecs(u64 nsecs)
ec156764 164{
b1ffe8f3 165 u64 T0 = get_nsecs(), T1;
ec156764
IM
166
167 do {
168 T1 = get_nsecs();
169 } while (T1 + run_measurement_overhead < T0 + nsecs);
170}
171
b1ffe8f3 172static void sleep_nsecs(u64 nsecs)
ec156764
IM
173{
174 struct timespec ts;
175
176 ts.tv_nsec = nsecs % 999999999;
177 ts.tv_sec = nsecs / 999999999;
178
179 nanosleep(&ts, NULL);
180}
181
182static void calibrate_run_measurement_overhead(void)
183{
b1ffe8f3 184 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
185 int i;
186
187 for (i = 0; i < 10; i++) {
188 T0 = get_nsecs();
189 burn_nsecs(0);
190 T1 = get_nsecs();
191 delta = T1-T0;
192 min_delta = min(min_delta, delta);
193 }
194 run_measurement_overhead = min_delta;
195
ad236fd2 196 printf("run measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
197}
198
199static void calibrate_sleep_measurement_overhead(void)
200{
b1ffe8f3 201 u64 T0, T1, delta, min_delta = 1000000000ULL;
ec156764
IM
202 int i;
203
204 for (i = 0; i < 10; i++) {
205 T0 = get_nsecs();
206 sleep_nsecs(10000);
207 T1 = get_nsecs();
208 delta = T1-T0;
209 min_delta = min(min_delta, delta);
210 }
211 min_delta -= 10000;
212 sleep_measurement_overhead = min_delta;
213
ad236fd2 214 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
ec156764
IM
215}
216
39aeb52f 217static struct sched_atom *
b1ffe8f3 218get_new_event(struct task_desc *task, u64 timestamp)
ec156764 219{
36479484 220 struct sched_atom *event = zalloc(sizeof(*event));
ec156764
IM
221 unsigned long idx = task->nr_events;
222 size_t size;
223
224 event->timestamp = timestamp;
225 event->nr = idx;
226
227 task->nr_events++;
39aeb52f 228 size = sizeof(struct sched_atom *) * task->nr_events;
229 task->atoms = realloc(task->atoms, size);
230 BUG_ON(!task->atoms);
ec156764 231
39aeb52f 232 task->atoms[idx] = event;
ec156764
IM
233
234 return event;
235}
236
39aeb52f 237static struct sched_atom *last_event(struct task_desc *task)
ec156764
IM
238{
239 if (!task->nr_events)
240 return NULL;
241
39aeb52f 242 return task->atoms[task->nr_events - 1];
ec156764
IM
243}
244
245static void
b1ffe8f3 246add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
ec156764 247{
39aeb52f 248 struct sched_atom *event, *curr_event = last_event(task);
ec156764
IM
249
250 /*
fbf94829
IM
251 * optimize an existing RUN event by merging this one
252 * to it:
253 */
ec156764
IM
254 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
255 nr_run_events_optimized++;
256 curr_event->duration += duration;
257 return;
258 }
259
260 event = get_new_event(task, timestamp);
261
262 event->type = SCHED_EVENT_RUN;
263 event->duration = duration;
264
265 nr_run_events++;
266}
267
ec156764 268static void
b1ffe8f3 269add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
ec156764
IM
270 struct task_desc *wakee)
271{
39aeb52f 272 struct sched_atom *event, *wakee_event;
ec156764
IM
273
274 event = get_new_event(task, timestamp);
275 event->type = SCHED_EVENT_WAKEUP;
276 event->wakee = wakee;
277
278 wakee_event = last_event(wakee);
279 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
280 targetless_wakeups++;
281 return;
282 }
283 if (wakee_event->wait_sem) {
284 multitarget_wakeups++;
285 return;
286 }
287
36479484 288 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
ec156764
IM
289 sem_init(wakee_event->wait_sem, 0, 0);
290 wakee_event->specific_wait = 1;
291 event->wait_sem = wakee_event->wait_sem;
292
293 nr_wakeup_events++;
294}
295
296static void
b1ffe8f3 297add_sched_event_sleep(struct task_desc *task, u64 timestamp,
ad236fd2 298 u64 task_state __used)
ec156764 299{
39aeb52f 300 struct sched_atom *event = get_new_event(task, timestamp);
ec156764
IM
301
302 event->type = SCHED_EVENT_SLEEP;
303
304 nr_sleep_events++;
305}
306
307static struct task_desc *register_pid(unsigned long pid, const char *comm)
308{
309 struct task_desc *task;
310
311 BUG_ON(pid >= MAX_PID);
312
313 task = pid_to_task[pid];
314
315 if (task)
316 return task;
317
36479484 318 task = zalloc(sizeof(*task));
ec156764
IM
319 task->pid = pid;
320 task->nr = nr_tasks;
321 strcpy(task->comm, comm);
322 /*
323 * every task starts in sleeping state - this gets ignored
324 * if there's no wakeup pointing to this sleep state:
325 */
326 add_sched_event_sleep(task, 0, 0);
327
328 pid_to_task[pid] = task;
329 nr_tasks++;
330 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
331 BUG_ON(!tasks);
332 tasks[task->nr] = task;
333
ad236fd2
IM
334 if (verbose)
335 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
ec156764
IM
336
337 return task;
338}
339
340
ec156764
IM
341static void print_task_traces(void)
342{
343 struct task_desc *task;
344 unsigned long i;
345
346 for (i = 0; i < nr_tasks; i++) {
347 task = tasks[i];
ad236fd2 348 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
ec156764
IM
349 task->nr, task->comm, task->pid, task->nr_events);
350 }
351}
352
353static void add_cross_task_wakeups(void)
354{
355 struct task_desc *task1, *task2;
356 unsigned long i, j;
357
358 for (i = 0; i < nr_tasks; i++) {
359 task1 = tasks[i];
360 j = i + 1;
361 if (j == nr_tasks)
362 j = 0;
363 task2 = tasks[j];
364 add_sched_event_wakeup(task1, 0, task2);
365 }
366}
367
368static void
39aeb52f 369process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
ec156764
IM
370{
371 int ret = 0;
b1ffe8f3 372 u64 now;
ec156764
IM
373 long long delta;
374
375 now = get_nsecs();
39aeb52f 376 delta = start_time + atom->timestamp - now;
ec156764 377
39aeb52f 378 switch (atom->type) {
ec156764 379 case SCHED_EVENT_RUN:
39aeb52f 380 burn_nsecs(atom->duration);
ec156764
IM
381 break;
382 case SCHED_EVENT_SLEEP:
39aeb52f 383 if (atom->wait_sem)
384 ret = sem_wait(atom->wait_sem);
ec156764
IM
385 BUG_ON(ret);
386 break;
387 case SCHED_EVENT_WAKEUP:
39aeb52f 388 if (atom->wait_sem)
389 ret = sem_post(atom->wait_sem);
ec156764
IM
390 BUG_ON(ret);
391 break;
55ffb7a6
MG
392 case SCHED_EVENT_MIGRATION:
393 break;
ec156764
IM
394 default:
395 BUG_ON(1);
396 }
397}
398
b1ffe8f3 399static u64 get_cpu_usage_nsec_parent(void)
ec156764
IM
400{
401 struct rusage ru;
b1ffe8f3 402 u64 sum;
ec156764
IM
403 int err;
404
405 err = getrusage(RUSAGE_SELF, &ru);
406 BUG_ON(err);
407
408 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
409 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
410
411 return sum;
412}
413
c0c9e721 414static int self_open_counters(void)
ec156764 415{
c0c9e721
XG
416 struct perf_event_attr attr;
417 int fd;
ec156764 418
c0c9e721 419 memset(&attr, 0, sizeof(attr));
ec156764 420
c0c9e721
XG
421 attr.type = PERF_TYPE_SOFTWARE;
422 attr.config = PERF_COUNT_SW_TASK_CLOCK;
ec156764 423
c0c9e721
XG
424 fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
425
426 if (fd < 0)
427 die("Error: sys_perf_event_open() syscall returned"
428 "with %d (%s)\n", fd, strerror(errno));
429 return fd;
430}
431
432static u64 get_cpu_usage_nsec_self(int fd)
433{
434 u64 runtime;
435 int ret;
436
437 ret = read(fd, &runtime, sizeof(runtime));
438 BUG_ON(ret != sizeof(runtime));
439
440 return runtime;
ec156764
IM
441}
442
443static void *thread_func(void *ctx)
444{
445 struct task_desc *this_task = ctx;
b1ffe8f3 446 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
447 unsigned long i, ret;
448 char comm2[22];
c0c9e721 449 int fd;
ec156764 450
ec156764
IM
451 sprintf(comm2, ":%s", this_task->comm);
452 prctl(PR_SET_NAME, comm2);
c0c9e721 453 fd = self_open_counters();
ec156764
IM
454
455again:
456 ret = sem_post(&this_task->ready_for_work);
457 BUG_ON(ret);
ec156764
IM
458 ret = pthread_mutex_lock(&start_work_mutex);
459 BUG_ON(ret);
460 ret = pthread_mutex_unlock(&start_work_mutex);
461 BUG_ON(ret);
ec156764 462
c0c9e721 463 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
ec156764
IM
464
465 for (i = 0; i < this_task->nr_events; i++) {
466 this_task->curr_event = i;
39aeb52f 467 process_sched_event(this_task, this_task->atoms[i]);
ec156764
IM
468 }
469
c0c9e721 470 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
ec156764 471 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
ec156764
IM
472 ret = sem_post(&this_task->work_done_sem);
473 BUG_ON(ret);
ec156764
IM
474
475 ret = pthread_mutex_lock(&work_done_wait_mutex);
476 BUG_ON(ret);
477 ret = pthread_mutex_unlock(&work_done_wait_mutex);
478 BUG_ON(ret);
ec156764
IM
479
480 goto again;
481}
482
483static void create_tasks(void)
484{
485 struct task_desc *task;
486 pthread_attr_t attr;
487 unsigned long i;
488 int err;
489
490 err = pthread_attr_init(&attr);
491 BUG_ON(err);
492 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
493 BUG_ON(err);
494 err = pthread_mutex_lock(&start_work_mutex);
495 BUG_ON(err);
496 err = pthread_mutex_lock(&work_done_wait_mutex);
497 BUG_ON(err);
498 for (i = 0; i < nr_tasks; i++) {
499 task = tasks[i];
500 sem_init(&task->sleep_sem, 0, 0);
501 sem_init(&task->ready_for_work, 0, 0);
502 sem_init(&task->work_done_sem, 0, 0);
503 task->curr_event = 0;
504 err = pthread_create(&task->thread, &attr, thread_func, task);
505 BUG_ON(err);
506 }
507}
508
ec156764
IM
509static void wait_for_tasks(void)
510{
b1ffe8f3 511 u64 cpu_usage_0, cpu_usage_1;
ec156764
IM
512 struct task_desc *task;
513 unsigned long i, ret;
514
ec156764 515 start_time = get_nsecs();
ec156764
IM
516 cpu_usage = 0;
517 pthread_mutex_unlock(&work_done_wait_mutex);
518
519 for (i = 0; i < nr_tasks; i++) {
520 task = tasks[i];
521 ret = sem_wait(&task->ready_for_work);
522 BUG_ON(ret);
523 sem_init(&task->ready_for_work, 0, 0);
524 }
525 ret = pthread_mutex_lock(&work_done_wait_mutex);
526 BUG_ON(ret);
527
528 cpu_usage_0 = get_cpu_usage_nsec_parent();
529
530 pthread_mutex_unlock(&start_work_mutex);
531
ec156764
IM
532 for (i = 0; i < nr_tasks; i++) {
533 task = tasks[i];
534 ret = sem_wait(&task->work_done_sem);
535 BUG_ON(ret);
536 sem_init(&task->work_done_sem, 0, 0);
537 cpu_usage += task->cpu_usage;
538 task->cpu_usage = 0;
539 }
540
541 cpu_usage_1 = get_cpu_usage_nsec_parent();
542 if (!runavg_cpu_usage)
543 runavg_cpu_usage = cpu_usage;
544 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
545
546 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
547 if (!runavg_parent_cpu_usage)
548 runavg_parent_cpu_usage = parent_cpu_usage;
549 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
550 parent_cpu_usage)/10;
551
552 ret = pthread_mutex_lock(&start_work_mutex);
553 BUG_ON(ret);
554
555 for (i = 0; i < nr_tasks; i++) {
556 task = tasks[i];
557 sem_init(&task->sleep_sem, 0, 0);
558 task->curr_event = 0;
559 }
560}
561
ec156764
IM
562static void run_one_test(void)
563{
b1ffe8f3 564 u64 T0, T1, delta, avg_delta, fluct, std_dev;
ec156764
IM
565
566 T0 = get_nsecs();
567 wait_for_tasks();
568 T1 = get_nsecs();
569
570 delta = T1 - T0;
571 sum_runtime += delta;
572 nr_runs++;
573
574 avg_delta = sum_runtime / nr_runs;
575 if (delta < avg_delta)
576 fluct = avg_delta - delta;
577 else
578 fluct = delta - avg_delta;
579 sum_fluct += fluct;
580 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
581 if (!run_avg)
582 run_avg = delta;
583 run_avg = (run_avg*9 + delta)/10;
584
ad236fd2 585 printf("#%-3ld: %0.3f, ",
ec156764
IM
586 nr_runs, (double)delta/1000000.0);
587
ad236fd2 588 printf("ravg: %0.2f, ",
ec156764
IM
589 (double)run_avg/1e6);
590
ad236fd2 591 printf("cpu: %0.2f / %0.2f",
ec156764
IM
592 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
593
594#if 0
595 /*
fbf94829
IM
596 * rusage statistics done by the parent, these are less
597 * accurate than the sum_exec_runtime based statistics:
598 */
ad236fd2 599 printf(" [%0.2f / %0.2f]",
ec156764
IM
600 (double)parent_cpu_usage/1e6,
601 (double)runavg_parent_cpu_usage/1e6);
602#endif
603
ad236fd2 604 printf("\n");
ec156764
IM
605
606 if (nr_sleep_corrections)
ad236fd2 607 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
ec156764
IM
608 nr_sleep_corrections = 0;
609}
610
611static void test_calibrations(void)
612{
b1ffe8f3 613 u64 T0, T1;
ec156764
IM
614
615 T0 = get_nsecs();
616 burn_nsecs(1e6);
617 T1 = get_nsecs();
618
ad236fd2 619 printf("the run test took %Ld nsecs\n", T1-T0);
ec156764
IM
620
621 T0 = get_nsecs();
622 sleep_nsecs(1e6);
623 T1 = get_nsecs();
624
ad236fd2 625 printf("the sleep test took %Ld nsecs\n", T1-T0);
ec156764
IM
626}
627
46538818
FW
628#define FILL_FIELD(ptr, field, event, data) \
629 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
630
631#define FILL_ARRAY(ptr, array, event, data) \
632do { \
633 void *__array = raw_field_ptr(event, #array, data); \
634 memcpy(ptr.array, __array, sizeof(ptr.array)); \
635} while(0)
636
637#define FILL_COMMON_FIELDS(ptr, event, data) \
638do { \
639 FILL_FIELD(ptr, common_type, event, data); \
640 FILL_FIELD(ptr, common_flags, event, data); \
641 FILL_FIELD(ptr, common_preempt_count, event, data); \
642 FILL_FIELD(ptr, common_pid, event, data); \
643 FILL_FIELD(ptr, common_tgid, event, data); \
644} while (0)
645
419ab0d6
FW
646
647
648struct trace_switch_event {
649 u32 size;
650
651 u16 common_type;
652 u8 common_flags;
653 u8 common_preempt_count;
654 u32 common_pid;
655 u32 common_tgid;
656
657 char prev_comm[16];
658 u32 prev_pid;
659 u32 prev_prio;
660 u64 prev_state;
661 char next_comm[16];
662 u32 next_pid;
663 u32 next_prio;
664};
665
39aeb52f 666struct trace_runtime_event {
667 u32 size;
668
669 u16 common_type;
670 u8 common_flags;
671 u8 common_preempt_count;
672 u32 common_pid;
673 u32 common_tgid;
674
675 char comm[16];
676 u32 pid;
677 u64 runtime;
678 u64 vruntime;
679};
419ab0d6 680
fbf94829
IM
681struct trace_wakeup_event {
682 u32 size;
683
684 u16 common_type;
685 u8 common_flags;
686 u8 common_preempt_count;
687 u32 common_pid;
688 u32 common_tgid;
689
690 char comm[16];
691 u32 pid;
692
693 u32 prio;
694 u32 success;
695 u32 cpu;
696};
697
419ab0d6
FW
698struct trace_fork_event {
699 u32 size;
46538818 700
419ab0d6
FW
701 u16 common_type;
702 u8 common_flags;
703 u8 common_preempt_count;
704 u32 common_pid;
705 u32 common_tgid;
706
707 char parent_comm[16];
708 u32 parent_pid;
709 char child_comm[16];
710 u32 child_pid;
711};
712
55ffb7a6
MG
713struct trace_migrate_task_event {
714 u32 size;
715
716 u16 common_type;
717 u8 common_flags;
718 u8 common_preempt_count;
719 u32 common_pid;
720 u32 common_tgid;
721
722 char comm[16];
723 u32 pid;
724
725 u32 prio;
726 u32 cpu;
727};
728
419ab0d6
FW
729struct trace_sched_handler {
730 void (*switch_event)(struct trace_switch_event *,
b3165f41 731 struct perf_session *,
419ab0d6
FW
732 struct event *,
733 int cpu,
734 u64 timestamp,
735 struct thread *thread);
736
39aeb52f 737 void (*runtime_event)(struct trace_runtime_event *,
b3165f41 738 struct perf_session *,
39aeb52f 739 struct event *,
740 int cpu,
741 u64 timestamp,
742 struct thread *thread);
743
419ab0d6 744 void (*wakeup_event)(struct trace_wakeup_event *,
b3165f41 745 struct perf_session *,
419ab0d6
FW
746 struct event *,
747 int cpu,
748 u64 timestamp,
749 struct thread *thread);
750
751 void (*fork_event)(struct trace_fork_event *,
752 struct event *,
753 int cpu,
754 u64 timestamp,
755 struct thread *thread);
55ffb7a6
MG
756
757 void (*migrate_task_event)(struct trace_migrate_task_event *,
b3165f41 758 struct perf_session *session,
55ffb7a6
MG
759 struct event *,
760 int cpu,
761 u64 timestamp,
762 struct thread *thread);
419ab0d6 763};
46538818 764
46538818 765
419ab0d6
FW
766static void
767replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
b3165f41 768 struct perf_session *session __used,
419ab0d6
FW
769 struct event *event,
770 int cpu __used,
771 u64 timestamp __used,
772 struct thread *thread __used)
773{
774 struct task_desc *waker, *wakee;
fbf94829 775
ad236fd2
IM
776 if (verbose) {
777 printf("sched_wakeup event %p\n", event);
fbf94829 778
ad236fd2 779 printf(" ... pid %d woke up %s/%d\n",
419ab0d6
FW
780 wakeup_event->common_pid,
781 wakeup_event->comm,
782 wakeup_event->pid);
ad236fd2 783 }
fbf94829 784
419ab0d6
FW
785 waker = register_pid(wakeup_event->common_pid, "<unknown>");
786 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
fbf94829
IM
787
788 add_sched_event_wakeup(waker, timestamp, wakee);
ec156764
IM
789}
790
d1153389 791static u64 cpu_last_switched[MAX_CPUS];
fbf94829
IM
792
793static void
419ab0d6 794replay_switch_event(struct trace_switch_event *switch_event,
b3165f41 795 struct perf_session *session __used,
419ab0d6
FW
796 struct event *event,
797 int cpu,
798 u64 timestamp,
799 struct thread *thread __used)
ec156764 800{
fbf94829
IM
801 struct task_desc *prev, *next;
802 u64 timestamp0;
803 s64 delta;
804
ad236fd2
IM
805 if (verbose)
806 printf("sched_switch event %p\n", event);
807
fbf94829
IM
808 if (cpu >= MAX_CPUS || cpu < 0)
809 return;
810
811 timestamp0 = cpu_last_switched[cpu];
812 if (timestamp0)
813 delta = timestamp - timestamp0;
814 else
815 delta = 0;
816
817 if (delta < 0)
818 die("hm, delta: %Ld < 0 ?\n", delta);
819
ad236fd2
IM
820 if (verbose) {
821 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
419ab0d6
FW
822 switch_event->prev_comm, switch_event->prev_pid,
823 switch_event->next_comm, switch_event->next_pid,
ad236fd2
IM
824 delta);
825 }
fbf94829 826
419ab0d6
FW
827 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
828 next = register_pid(switch_event->next_pid, switch_event->next_comm);
fbf94829
IM
829
830 cpu_last_switched[cpu] = timestamp;
831
832 add_sched_event_run(prev, timestamp, delta);
419ab0d6 833 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
fbf94829
IM
834}
835
fbf94829 836
419ab0d6
FW
837static void
838replay_fork_event(struct trace_fork_event *fork_event,
839 struct event *event,
840 int cpu __used,
841 u64 timestamp __used,
842 struct thread *thread __used)
843{
844 if (verbose) {
845 printf("sched_fork event %p\n", event);
846 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
847 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
848 }
849 register_pid(fork_event->parent_pid, fork_event->parent_comm);
850 register_pid(fork_event->child_pid, fork_event->child_comm);
851}
fbf94829 852
419ab0d6 853static struct trace_sched_handler replay_ops = {
ea92ed5a
IM
854 .wakeup_event = replay_wakeup_event,
855 .switch_event = replay_switch_event,
856 .fork_event = replay_fork_event,
fbf94829
IM
857};
858
b1ffe8f3
IM
859struct sort_dimension {
860 const char *name;
b5fae128 861 sort_fn_t cmp;
b1ffe8f3
IM
862 struct list_head list;
863};
864
865static LIST_HEAD(cmp_pid);
866
daa1d7a5 867static int
39aeb52f 868thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
869{
870 struct sort_dimension *sort;
871 int ret = 0;
872
b5fae128
IM
873 BUG_ON(list_empty(list));
874
daa1d7a5
FW
875 list_for_each_entry(sort, list, list) {
876 ret = sort->cmp(l, r);
877 if (ret)
878 return ret;
879 }
880
881 return ret;
882}
883
39aeb52f 884static struct work_atoms *
b5fae128
IM
885thread_atoms_search(struct rb_root *root, struct thread *thread,
886 struct list_head *sort_list)
887{
888 struct rb_node *node = root->rb_node;
39aeb52f 889 struct work_atoms key = { .thread = thread };
b5fae128
IM
890
891 while (node) {
39aeb52f 892 struct work_atoms *atoms;
b5fae128
IM
893 int cmp;
894
39aeb52f 895 atoms = container_of(node, struct work_atoms, node);
b5fae128
IM
896
897 cmp = thread_lat_cmp(sort_list, &key, atoms);
898 if (cmp > 0)
899 node = node->rb_left;
900 else if (cmp < 0)
901 node = node->rb_right;
902 else {
903 BUG_ON(thread != atoms->thread);
904 return atoms;
905 }
906 }
907 return NULL;
908}
909
cdce9d73 910static void
39aeb52f 911__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
daa1d7a5 912 struct list_head *sort_list)
cdce9d73
FW
913{
914 struct rb_node **new = &(root->rb_node), *parent = NULL;
915
916 while (*new) {
39aeb52f 917 struct work_atoms *this;
daa1d7a5 918 int cmp;
cdce9d73 919
39aeb52f 920 this = container_of(*new, struct work_atoms, node);
cdce9d73 921 parent = *new;
daa1d7a5
FW
922
923 cmp = thread_lat_cmp(sort_list, data, this);
924
925 if (cmp > 0)
cdce9d73 926 new = &((*new)->rb_left);
cdce9d73 927 else
daa1d7a5 928 new = &((*new)->rb_right);
cdce9d73
FW
929 }
930
931 rb_link_node(&data->node, parent, new);
932 rb_insert_color(&data->node, root);
933}
934
b1ffe8f3 935static void thread_atoms_insert(struct thread *thread)
cdce9d73 936{
36479484 937 struct work_atoms *atoms = zalloc(sizeof(*atoms));
17562205 938 if (!atoms)
cdce9d73
FW
939 die("No memory");
940
17562205 941 atoms->thread = thread;
39aeb52f 942 INIT_LIST_HEAD(&atoms->work_list);
b1ffe8f3 943 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
cdce9d73
FW
944}
945
946static void
947latency_fork_event(struct trace_fork_event *fork_event __used,
948 struct event *event __used,
949 int cpu __used,
950 u64 timestamp __used,
951 struct thread *thread __used)
952{
953 /* should insert the newcomer */
954}
955
ea92ed5a 956__used
cdce9d73
FW
957static char sched_out_state(struct trace_switch_event *switch_event)
958{
959 const char *str = TASK_STATE_TO_CHAR_STR;
960
961 return str[switch_event->prev_state];
962}
963
964static void
39aeb52f 965add_sched_out_event(struct work_atoms *atoms,
966 char run_state,
967 u64 timestamp)
cdce9d73 968{
36479484 969 struct work_atom *atom = zalloc(sizeof(*atom));
b1ffe8f3 970 if (!atom)
cdce9d73
FW
971 die("Non memory");
972
aa1ab9d2
FW
973 atom->sched_out_time = timestamp;
974
39aeb52f 975 if (run_state == 'R') {
b1ffe8f3 976 atom->state = THREAD_WAIT_CPU;
aa1ab9d2 977 atom->wake_up_time = atom->sched_out_time;
c6ced611
FW
978 }
979
39aeb52f 980 list_add_tail(&atom->list, &atoms->work_list);
cdce9d73
FW
981}
982
983static void
39aeb52f 984add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
985{
986 struct work_atom *atom;
987
988 BUG_ON(list_empty(&atoms->work_list));
989
990 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
991
992 atom->runtime += delta;
993 atoms->total_runtime += delta;
994}
995
996static void
997add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
cdce9d73 998{
b1ffe8f3 999 struct work_atom *atom;
66685678 1000 u64 delta;
cdce9d73 1001
39aeb52f 1002 if (list_empty(&atoms->work_list))
cdce9d73
FW
1003 return;
1004
39aeb52f 1005 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1006
b1ffe8f3 1007 if (atom->state != THREAD_WAIT_CPU)
cdce9d73
FW
1008 return;
1009
b1ffe8f3
IM
1010 if (timestamp < atom->wake_up_time) {
1011 atom->state = THREAD_IGNORE;
cdce9d73
FW
1012 return;
1013 }
1014
b1ffe8f3
IM
1015 atom->state = THREAD_SCHED_IN;
1016 atom->sched_in_time = timestamp;
66685678 1017
b1ffe8f3 1018 delta = atom->sched_in_time - atom->wake_up_time;
66685678 1019 atoms->total_lat += delta;
3786310a 1020 if (delta > atoms->max_lat) {
66685678 1021 atoms->max_lat = delta;
3786310a
FW
1022 atoms->max_lat_at = timestamp;
1023 }
66685678 1024 atoms->nb_atoms++;
cdce9d73
FW
1025}
1026
cdce9d73
FW
1027static void
1028latency_switch_event(struct trace_switch_event *switch_event,
b3165f41 1029 struct perf_session *session,
cdce9d73 1030 struct event *event __used,
ea92ed5a 1031 int cpu,
cdce9d73
FW
1032 u64 timestamp,
1033 struct thread *thread __used)
1034{
39aeb52f 1035 struct work_atoms *out_events, *in_events;
cdce9d73 1036 struct thread *sched_out, *sched_in;
ea92ed5a
IM
1037 u64 timestamp0;
1038 s64 delta;
1039
39aeb52f 1040 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
ea92ed5a
IM
1041
1042 timestamp0 = cpu_last_switched[cpu];
1043 cpu_last_switched[cpu] = timestamp;
1044 if (timestamp0)
1045 delta = timestamp - timestamp0;
1046 else
1047 delta = 0;
1048
1049 if (delta < 0)
1050 die("hm, delta: %Ld < 0 ?\n", delta);
1051
cdce9d73 1052
b3165f41
ACM
1053 sched_out = perf_session__findnew(session, switch_event->prev_pid);
1054 sched_in = perf_session__findnew(session, switch_event->next_pid);
cdce9d73 1055
39aeb52f 1056 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1057 if (!out_events) {
1058 thread_atoms_insert(sched_out);
1059 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1060 if (!out_events)
1061 die("out-event: Internal tree error");
1062 }
1063 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1064
1065 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1066 if (!in_events) {
b1ffe8f3 1067 thread_atoms_insert(sched_in);
39aeb52f 1068 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1069 if (!in_events)
1070 die("in-event: Internal tree error");
1071 /*
1072 * Take came in we have not heard about yet,
1073 * add in an initial atom in runnable state:
1074 */
1075 add_sched_out_event(in_events, 'R', timestamp);
cdce9d73 1076 }
39aeb52f 1077 add_sched_in_event(in_events, timestamp);
1078}
cdce9d73 1079
39aeb52f 1080static void
1081latency_runtime_event(struct trace_runtime_event *runtime_event,
b3165f41 1082 struct perf_session *session,
39aeb52f 1083 struct event *event __used,
1084 int cpu,
1085 u64 timestamp,
1086 struct thread *this_thread __used)
1087{
b3165f41 1088 struct thread *thread = perf_session__findnew(session, runtime_event->pid);
d5b889f2 1089 struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
39aeb52f 1090
1091 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
39aeb52f 1092 if (!atoms) {
1093 thread_atoms_insert(thread);
1094 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1095 if (!atoms)
1096 die("in-event: Internal tree error");
1097 add_sched_out_event(atoms, 'R', timestamp);
cdce9d73
FW
1098 }
1099
39aeb52f 1100 add_runtime_event(atoms, runtime_event->runtime, timestamp);
cdce9d73
FW
1101}
1102
1103static void
1104latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
b3165f41 1105 struct perf_session *session,
39aeb52f 1106 struct event *__event __used,
cdce9d73
FW
1107 int cpu __used,
1108 u64 timestamp,
1109 struct thread *thread __used)
1110{
39aeb52f 1111 struct work_atoms *atoms;
b1ffe8f3 1112 struct work_atom *atom;
cdce9d73
FW
1113 struct thread *wakee;
1114
1115 /* Note for later, it may be interesting to observe the failing cases */
1116 if (!wakeup_event->success)
1117 return;
1118
b3165f41 1119 wakee = perf_session__findnew(session, wakeup_event->pid);
b5fae128 1120 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
17562205 1121 if (!atoms) {
b1ffe8f3 1122 thread_atoms_insert(wakee);
39aeb52f 1123 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1124 if (!atoms)
1125 die("wakeup-event: Internal tree error");
1126 add_sched_out_event(atoms, 'S', timestamp);
cdce9d73
FW
1127 }
1128
39aeb52f 1129 BUG_ON(list_empty(&atoms->work_list));
cdce9d73 1130
39aeb52f 1131 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
cdce9d73 1132
55ffb7a6
MG
1133 /*
1134 * You WILL be missing events if you've recorded only
1135 * one CPU, or are only looking at only one, so don't
1136 * make useless noise.
1137 */
1138 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
dc02bf71 1139 nr_state_machine_bugs++;
cdce9d73 1140
ea57c4f5
IM
1141 nr_timestamps++;
1142 if (atom->sched_out_time > timestamp) {
dc02bf71 1143 nr_unordered_timestamps++;
aa1ab9d2 1144 return;
ea57c4f5 1145 }
aa1ab9d2 1146
b1ffe8f3
IM
1147 atom->state = THREAD_WAIT_CPU;
1148 atom->wake_up_time = timestamp;
cdce9d73
FW
1149}
1150
55ffb7a6
MG
1151static void
1152latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
b3165f41 1153 struct perf_session *session,
55ffb7a6
MG
1154 struct event *__event __used,
1155 int cpu __used,
1156 u64 timestamp,
1157 struct thread *thread __used)
1158{
1159 struct work_atoms *atoms;
1160 struct work_atom *atom;
1161 struct thread *migrant;
1162
1163 /*
1164 * Only need to worry about migration when profiling one CPU.
1165 */
1166 if (profile_cpu == -1)
1167 return;
1168
b3165f41 1169 migrant = perf_session__findnew(session, migrate_task_event->pid);
55ffb7a6
MG
1170 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1171 if (!atoms) {
1172 thread_atoms_insert(migrant);
1173 register_pid(migrant->pid, migrant->comm);
1174 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1175 if (!atoms)
1176 die("migration-event: Internal tree error");
1177 add_sched_out_event(atoms, 'R', timestamp);
1178 }
1179
1180 BUG_ON(list_empty(&atoms->work_list));
1181
1182 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1183 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1184
1185 nr_timestamps++;
1186
1187 if (atom->sched_out_time > timestamp)
1188 nr_unordered_timestamps++;
1189}
1190
cdce9d73 1191static struct trace_sched_handler lat_ops = {
ea92ed5a
IM
1192 .wakeup_event = latency_wakeup_event,
1193 .switch_event = latency_switch_event,
39aeb52f 1194 .runtime_event = latency_runtime_event,
ea92ed5a 1195 .fork_event = latency_fork_event,
55ffb7a6 1196 .migrate_task_event = latency_migrate_task_event,
cdce9d73
FW
1197};
1198
39aeb52f 1199static void output_lat_thread(struct work_atoms *work_list)
cdce9d73 1200{
cdce9d73
FW
1201 int i;
1202 int ret;
66685678 1203 u64 avg;
cdce9d73 1204
39aeb52f 1205 if (!work_list->nb_atoms)
cdce9d73 1206 return;
ea57c4f5
IM
1207 /*
1208 * Ignore idle threads:
1209 */
80ed0987 1210 if (!strcmp(work_list->thread->comm, "swapper"))
ea57c4f5 1211 return;
cdce9d73 1212
39aeb52f 1213 all_runtime += work_list->total_runtime;
1214 all_count += work_list->nb_atoms;
66685678 1215
80ed0987 1216 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
cdce9d73 1217
08f69e6c 1218 for (i = 0; i < 24 - ret; i++)
cdce9d73
FW
1219 printf(" ");
1220
39aeb52f 1221 avg = work_list->total_lat / work_list->nb_atoms;
cdce9d73 1222
3786310a 1223 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
39aeb52f 1224 (double)work_list->total_runtime / 1e6,
1225 work_list->nb_atoms, (double)avg / 1e6,
3786310a
FW
1226 (double)work_list->max_lat / 1e6,
1227 (double)work_list->max_lat_at / 1e9);
cdce9d73
FW
1228}
1229
39aeb52f 1230static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5 1231{
daa1d7a5
FW
1232 if (l->thread->pid < r->thread->pid)
1233 return -1;
1234 if (l->thread->pid > r->thread->pid)
1235 return 1;
1236
1237 return 0;
1238}
1239
1240static struct sort_dimension pid_sort_dimension = {
b5fae128
IM
1241 .name = "pid",
1242 .cmp = pid_cmp,
daa1d7a5
FW
1243};
1244
39aeb52f 1245static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1246{
1247 u64 avgl, avgr;
1248
1249 if (!l->nb_atoms)
1250 return -1;
1251
1252 if (!r->nb_atoms)
1253 return 1;
1254
1255 avgl = l->total_lat / l->nb_atoms;
1256 avgr = r->total_lat / r->nb_atoms;
1257
1258 if (avgl < avgr)
1259 return -1;
1260 if (avgl > avgr)
1261 return 1;
1262
1263 return 0;
1264}
1265
1266static struct sort_dimension avg_sort_dimension = {
b5fae128
IM
1267 .name = "avg",
1268 .cmp = avg_cmp,
daa1d7a5
FW
1269};
1270
39aeb52f 1271static int max_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1272{
1273 if (l->max_lat < r->max_lat)
1274 return -1;
1275 if (l->max_lat > r->max_lat)
1276 return 1;
1277
1278 return 0;
1279}
1280
1281static struct sort_dimension max_sort_dimension = {
b5fae128
IM
1282 .name = "max",
1283 .cmp = max_cmp,
daa1d7a5
FW
1284};
1285
39aeb52f 1286static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1287{
1288 if (l->nb_atoms < r->nb_atoms)
1289 return -1;
1290 if (l->nb_atoms > r->nb_atoms)
1291 return 1;
1292
1293 return 0;
1294}
1295
1296static struct sort_dimension switch_sort_dimension = {
b5fae128
IM
1297 .name = "switch",
1298 .cmp = switch_cmp,
daa1d7a5
FW
1299};
1300
39aeb52f 1301static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
daa1d7a5
FW
1302{
1303 if (l->total_runtime < r->total_runtime)
1304 return -1;
1305 if (l->total_runtime > r->total_runtime)
1306 return 1;
1307
1308 return 0;
1309}
1310
1311static struct sort_dimension runtime_sort_dimension = {
b5fae128
IM
1312 .name = "runtime",
1313 .cmp = runtime_cmp,
daa1d7a5
FW
1314};
1315
1316static struct sort_dimension *available_sorts[] = {
1317 &pid_sort_dimension,
1318 &avg_sort_dimension,
1319 &max_sort_dimension,
1320 &switch_sort_dimension,
1321 &runtime_sort_dimension,
1322};
1323
1324#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1325
1326static LIST_HEAD(sort_list);
1327
cbef79a8 1328static int sort_dimension__add(const char *tok, struct list_head *list)
daa1d7a5
FW
1329{
1330 int i;
1331
1332 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1333 if (!strcmp(available_sorts[i]->name, tok)) {
1334 list_add_tail(&available_sorts[i]->list, list);
1335
1336 return 0;
1337 }
1338 }
1339
1340 return -1;
1341}
1342
1343static void setup_sorting(void);
1344
1345static void sort_lat(void)
1346{
1347 struct rb_node *node;
1348
1349 for (;;) {
39aeb52f 1350 struct work_atoms *data;
b1ffe8f3 1351 node = rb_first(&atom_root);
daa1d7a5
FW
1352 if (!node)
1353 break;
1354
b1ffe8f3 1355 rb_erase(node, &atom_root);
39aeb52f 1356 data = rb_entry(node, struct work_atoms, node);
b1ffe8f3 1357 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
daa1d7a5
FW
1358 }
1359}
1360
419ab0d6
FW
1361static struct trace_sched_handler *trace_handler;
1362
fbf94829 1363static void
b3165f41 1364process_sched_wakeup_event(void *data, struct perf_session *session,
419ab0d6
FW
1365 struct event *event,
1366 int cpu __used,
1367 u64 timestamp __used,
1368 struct thread *thread __used)
1369{
1370 struct trace_wakeup_event wakeup_event;
1371
f48f669d 1372 FILL_COMMON_FIELDS(wakeup_event, event, data);
419ab0d6 1373
f48f669d
XG
1374 FILL_ARRAY(wakeup_event, comm, event, data);
1375 FILL_FIELD(wakeup_event, pid, event, data);
1376 FILL_FIELD(wakeup_event, prio, event, data);
1377 FILL_FIELD(wakeup_event, success, event, data);
1378 FILL_FIELD(wakeup_event, cpu, event, data);
419ab0d6 1379
0ec04e16 1380 if (trace_handler->wakeup_event)
b3165f41
ACM
1381 trace_handler->wakeup_event(&wakeup_event, session, event,
1382 cpu, timestamp, thread);
419ab0d6
FW
1383}
1384
c8a37751
IM
1385/*
1386 * Track the current task - that way we can know whether there's any
1387 * weird events, such as a task being switched away that is not current.
1388 */
40749d0f 1389static int max_cpu;
0ec04e16 1390
c8a37751
IM
1391static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1392
0ec04e16
IM
1393static struct thread *curr_thread[MAX_CPUS];
1394
1395static char next_shortname1 = 'A';
1396static char next_shortname2 = '0';
1397
1398static void
1399map_switch_event(struct trace_switch_event *switch_event,
b3165f41 1400 struct perf_session *session,
0ec04e16
IM
1401 struct event *event __used,
1402 int this_cpu,
1403 u64 timestamp,
1404 struct thread *thread __used)
1405{
1406 struct thread *sched_out, *sched_in;
1407 int new_shortname;
1408 u64 timestamp0;
1409 s64 delta;
1410 int cpu;
1411
1412 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1413
1414 if (this_cpu > max_cpu)
1415 max_cpu = this_cpu;
1416
1417 timestamp0 = cpu_last_switched[this_cpu];
1418 cpu_last_switched[this_cpu] = timestamp;
1419 if (timestamp0)
1420 delta = timestamp - timestamp0;
1421 else
1422 delta = 0;
1423
1424 if (delta < 0)
1425 die("hm, delta: %Ld < 0 ?\n", delta);
1426
1427
b3165f41
ACM
1428 sched_out = perf_session__findnew(session, switch_event->prev_pid);
1429 sched_in = perf_session__findnew(session, switch_event->next_pid);
0ec04e16
IM
1430
1431 curr_thread[this_cpu] = sched_in;
1432
1433 printf(" ");
1434
1435 new_shortname = 0;
1436 if (!sched_in->shortname[0]) {
1437 sched_in->shortname[0] = next_shortname1;
1438 sched_in->shortname[1] = next_shortname2;
1439
1440 if (next_shortname1 < 'Z') {
1441 next_shortname1++;
1442 } else {
1443 next_shortname1='A';
1444 if (next_shortname2 < '9') {
1445 next_shortname2++;
1446 } else {
1447 next_shortname2='0';
1448 }
1449 }
1450 new_shortname = 1;
1451 }
1452
1453 for (cpu = 0; cpu <= max_cpu; cpu++) {
1454 if (cpu != this_cpu)
1455 printf(" ");
1456 else
1457 printf("*");
1458
1459 if (curr_thread[cpu]) {
1460 if (curr_thread[cpu]->pid)
1461 printf("%2s ", curr_thread[cpu]->shortname);
1462 else
1463 printf(". ");
1464 } else
1465 printf(" ");
1466 }
1467
1468 printf(" %12.6f secs ", (double)timestamp/1e9);
1469 if (new_shortname) {
1470 printf("%s => %s:%d\n",
1471 sched_in->shortname, sched_in->comm, sched_in->pid);
1472 } else {
1473 printf("\n");
1474 }
1475}
1476
1477
419ab0d6 1478static void
b3165f41 1479process_sched_switch_event(void *data, struct perf_session *session,
419ab0d6 1480 struct event *event,
0ec04e16 1481 int this_cpu,
419ab0d6
FW
1482 u64 timestamp __used,
1483 struct thread *thread __used)
1484{
1485 struct trace_switch_event switch_event;
1486
f48f669d 1487 FILL_COMMON_FIELDS(switch_event, event, data);
419ab0d6 1488
f48f669d
XG
1489 FILL_ARRAY(switch_event, prev_comm, event, data);
1490 FILL_FIELD(switch_event, prev_pid, event, data);
1491 FILL_FIELD(switch_event, prev_prio, event, data);
1492 FILL_FIELD(switch_event, prev_state, event, data);
1493 FILL_ARRAY(switch_event, next_comm, event, data);
1494 FILL_FIELD(switch_event, next_pid, event, data);
1495 FILL_FIELD(switch_event, next_prio, event, data);
419ab0d6 1496
0ec04e16 1497 if (curr_pid[this_cpu] != (u32)-1) {
c8a37751
IM
1498 /*
1499 * Are we trying to switch away a PID that is
1500 * not current?
1501 */
0ec04e16 1502 if (curr_pid[this_cpu] != switch_event.prev_pid)
c8a37751
IM
1503 nr_context_switch_bugs++;
1504 }
0ec04e16 1505 if (trace_handler->switch_event)
b3165f41
ACM
1506 trace_handler->switch_event(&switch_event, session, event,
1507 this_cpu, timestamp, thread);
c8a37751 1508
0ec04e16 1509 curr_pid[this_cpu] = switch_event.next_pid;
419ab0d6
FW
1510}
1511
39aeb52f 1512static void
b3165f41 1513process_sched_runtime_event(void *data, struct perf_session *session,
39aeb52f 1514 struct event *event,
1515 int cpu __used,
1516 u64 timestamp __used,
1517 struct thread *thread __used)
1518{
1519 struct trace_runtime_event runtime_event;
1520
f48f669d
XG
1521 FILL_ARRAY(runtime_event, comm, event, data);
1522 FILL_FIELD(runtime_event, pid, event, data);
1523 FILL_FIELD(runtime_event, runtime, event, data);
1524 FILL_FIELD(runtime_event, vruntime, event, data);
39aeb52f 1525
0ec04e16 1526 if (trace_handler->runtime_event)
b3165f41 1527 trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
39aeb52f 1528}
1529
419ab0d6 1530static void
f48f669d 1531process_sched_fork_event(void *data,
419ab0d6
FW
1532 struct event *event,
1533 int cpu __used,
1534 u64 timestamp __used,
1535 struct thread *thread __used)
fbf94829 1536{
46538818
FW
1537 struct trace_fork_event fork_event;
1538
f48f669d 1539 FILL_COMMON_FIELDS(fork_event, event, data);
46538818 1540
f48f669d
XG
1541 FILL_ARRAY(fork_event, parent_comm, event, data);
1542 FILL_FIELD(fork_event, parent_pid, event, data);
1543 FILL_ARRAY(fork_event, child_comm, event, data);
1544 FILL_FIELD(fork_event, child_pid, event, data);
46538818 1545
0ec04e16 1546 if (trace_handler->fork_event)
b3165f41
ACM
1547 trace_handler->fork_event(&fork_event, event,
1548 cpu, timestamp, thread);
fbf94829
IM
1549}
1550
419ab0d6
FW
1551static void
1552process_sched_exit_event(struct event *event,
1553 int cpu __used,
1554 u64 timestamp __used,
1555 struct thread *thread __used)
fbf94829 1556{
ad236fd2
IM
1557 if (verbose)
1558 printf("sched_exit event %p\n", event);
ec156764
IM
1559}
1560
55ffb7a6 1561static void
b3165f41 1562process_sched_migrate_task_event(void *data, struct perf_session *session,
55ffb7a6
MG
1563 struct event *event,
1564 int cpu __used,
1565 u64 timestamp __used,
1566 struct thread *thread __used)
1567{
1568 struct trace_migrate_task_event migrate_task_event;
1569
f48f669d 1570 FILL_COMMON_FIELDS(migrate_task_event, event, data);
55ffb7a6 1571
f48f669d
XG
1572 FILL_ARRAY(migrate_task_event, comm, event, data);
1573 FILL_FIELD(migrate_task_event, pid, event, data);
1574 FILL_FIELD(migrate_task_event, prio, event, data);
1575 FILL_FIELD(migrate_task_event, cpu, event, data);
55ffb7a6
MG
1576
1577 if (trace_handler->migrate_task_event)
b3165f41
ACM
1578 trace_handler->migrate_task_event(&migrate_task_event, session,
1579 event, cpu, timestamp, thread);
55ffb7a6
MG
1580}
1581
ec156764 1582static void
b3165f41
ACM
1583process_raw_event(event_t *raw_event __used, struct perf_session *session,
1584 void *data, int cpu, u64 timestamp, struct thread *thread)
ec156764 1585{
ec156764
IM
1586 struct event *event;
1587 int type;
1588
d8bd9e0a 1589
f48f669d 1590 type = trace_parse_common_type(data);
ec156764
IM
1591 event = trace_find_event(type);
1592
ec156764 1593 if (!strcmp(event->name, "sched_switch"))
b3165f41 1594 process_sched_switch_event(data, session, event, cpu, timestamp, thread);
39aeb52f 1595 if (!strcmp(event->name, "sched_stat_runtime"))
b3165f41 1596 process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
ec156764 1597 if (!strcmp(event->name, "sched_wakeup"))
b3165f41 1598 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
fbf94829 1599 if (!strcmp(event->name, "sched_wakeup_new"))
b3165f41 1600 process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
fbf94829 1601 if (!strcmp(event->name, "sched_process_fork"))
f48f669d 1602 process_sched_fork_event(data, event, cpu, timestamp, thread);
fbf94829
IM
1603 if (!strcmp(event->name, "sched_process_exit"))
1604 process_sched_exit_event(event, cpu, timestamp, thread);
55ffb7a6 1605 if (!strcmp(event->name, "sched_migrate_task"))
b3165f41 1606 process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
ec156764
IM
1607}
1608
b3165f41 1609static int process_sample_event(event_t *event, struct perf_session *session)
0a02ad93 1610{
180f95e2 1611 struct sample_data data;
0a02ad93 1612 struct thread *thread;
a80deb62 1613
c019879b 1614 if (!(session->sample_type & PERF_SAMPLE_RAW))
a80deb62 1615 return 0;
0a02ad93 1616
180f95e2
OH
1617 memset(&data, 0, sizeof(data));
1618 data.time = -1;
1619 data.cpu = -1;
1620 data.period = -1;
0a02ad93 1621
c019879b 1622 event__parse_sample(event, session->sample_type, &data);
0a02ad93 1623
0d755034
ACM
1624 dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
1625 data.pid, data.tid, data.ip, data.period);
0a02ad93 1626
b3165f41 1627 thread = perf_session__findnew(session, data.pid);
0a02ad93 1628 if (thread == NULL) {
6beba7ad
ACM
1629 pr_debug("problem processing %d event, skipping it.\n",
1630 event->header.type);
0a02ad93
IM
1631 return -1;
1632 }
1633
f39cdf25
JL
1634 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1635
180f95e2 1636 if (profile_cpu != -1 && profile_cpu != (int)data.cpu)
55ffb7a6
MG
1637 return 0;
1638
b3165f41 1639 process_raw_event(event, session, data.raw_data, data.cpu, data.time, thread);
0a02ad93
IM
1640
1641 return 0;
1642}
1643
301a0b02 1644static struct perf_event_ops event_ops = {
a64eae70
FW
1645 .sample = process_sample_event,
1646 .comm = event__process_comm,
cee75ac7 1647 .lost = event__process_lost,
af64865b 1648 .fork = event__process_task,
a64eae70 1649 .ordered_samples = true,
016e92fb
FW
1650};
1651
46f392c9 1652static int read_events(void)
0a02ad93 1653{
d549c769 1654 int err = -EINVAL;
454c407e 1655 struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false);
94c744b6
ACM
1656 if (session == NULL)
1657 return -ENOMEM;
1658
cee75ac7 1659 if (perf_session__has_traces(session, "record -R")) {
d549c769 1660 err = perf_session__process_events(session, &event_ops);
cee75ac7
ACM
1661 nr_events = session->hists.stats.nr_events[0];
1662 nr_lost_events = session->hists.stats.total_lost;
1663 nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1664 }
d549c769 1665
94c744b6
ACM
1666 perf_session__delete(session);
1667 return err;
0a02ad93
IM
1668}
1669
0ec04e16
IM
1670static void print_bad_events(void)
1671{
1672 if (nr_unordered_timestamps && nr_timestamps) {
1673 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1674 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1675 nr_unordered_timestamps, nr_timestamps);
1676 }
1677 if (nr_lost_events && nr_events) {
1678 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1679 (double)nr_lost_events/(double)nr_events*100.0,
1680 nr_lost_events, nr_events, nr_lost_chunks);
1681 }
1682 if (nr_state_machine_bugs && nr_timestamps) {
1683 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1684 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1685 nr_state_machine_bugs, nr_timestamps);
1686 if (nr_lost_events)
1687 printf(" (due to lost events?)");
1688 printf("\n");
1689 }
1690 if (nr_context_switch_bugs && nr_timestamps) {
1691 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1692 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1693 nr_context_switch_bugs, nr_timestamps);
1694 if (nr_lost_events)
1695 printf(" (due to lost events?)");
1696 printf("\n");
1697 }
1698}
1699
1700static void __cmd_lat(void)
1701{
1702 struct rb_node *next;
1703
1704 setup_pager();
1705 read_events();
1706 sort_lat();
1707
3786310a
FW
1708 printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1709 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
1710 printf(" ---------------------------------------------------------------------------------------------------------------\n");
0ec04e16
IM
1711
1712 next = rb_first(&sorted_atom_root);
1713
1714 while (next) {
1715 struct work_atoms *work_list;
1716
1717 work_list = rb_entry(next, struct work_atoms, node);
1718 output_lat_thread(work_list);
1719 next = rb_next(next);
1720 }
1721
1722 printf(" -----------------------------------------------------------------------------------------\n");
1723 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1724 (double)all_runtime/1e6, all_count);
1725
1726 printf(" ---------------------------------------------------\n");
1727
1728 print_bad_events();
1729 printf("\n");
1730
1731}
1732
1733static struct trace_sched_handler map_ops = {
1734 .wakeup_event = NULL,
1735 .switch_event = map_switch_event,
1736 .runtime_event = NULL,
1737 .fork_event = NULL,
1738};
1739
1740static void __cmd_map(void)
1741{
40749d0f
IM
1742 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1743
0ec04e16
IM
1744 setup_pager();
1745 read_events();
1746 print_bad_events();
1747}
1748
1749static void __cmd_replay(void)
1750{
1751 unsigned long i;
1752
1753 calibrate_run_measurement_overhead();
1754 calibrate_sleep_measurement_overhead();
1755
1756 test_calibrations();
1757
1758 read_events();
1759
1760 printf("nr_run_events: %ld\n", nr_run_events);
1761 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1762 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1763
1764 if (targetless_wakeups)
1765 printf("target-less wakeups: %ld\n", targetless_wakeups);
1766 if (multitarget_wakeups)
1767 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1768 if (nr_run_events_optimized)
1769 printf("run atoms optimized: %ld\n",
1770 nr_run_events_optimized);
1771
1772 print_task_traces();
1773 add_cross_task_wakeups();
1774
1775 create_tasks();
1776 printf("------------------------------------------------------------\n");
1777 for (i = 0; i < replay_repeat; i++)
1778 run_one_test();
1779}
1780
1781
46f392c9 1782static const char * const sched_usage[] = {
4b77a729 1783 "perf sched [<options>] {record|latency|map|replay|trace}",
0a02ad93
IM
1784 NULL
1785};
1786
f2858d8a 1787static const struct option sched_options[] = {
4b77a729
MG
1788 OPT_STRING('i', "input", &input_name, "file",
1789 "input file name"),
c0555642 1790 OPT_INCR('v', "verbose", &verbose,
f2858d8a 1791 "be more verbose (show symbol address, etc)"),
0a02ad93
IM
1792 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1793 "dump raw trace in ASCII"),
f2858d8a
IM
1794 OPT_END()
1795};
1796
1797static const char * const latency_usage[] = {
1798 "perf sched latency [<options>]",
1799 NULL
1800};
1801
1802static const struct option latency_options[] = {
daa1d7a5
FW
1803 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1804 "sort by key(s): runtime, switch, avg, max"),
c0555642 1805 OPT_INCR('v', "verbose", &verbose,
0a02ad93 1806 "be more verbose (show symbol address, etc)"),
55ffb7a6
MG
1807 OPT_INTEGER('C', "CPU", &profile_cpu,
1808 "CPU to profile on"),
f2858d8a
IM
1809 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1810 "dump raw trace in ASCII"),
1811 OPT_END()
1812};
1813
1814static const char * const replay_usage[] = {
1815 "perf sched replay [<options>]",
1816 NULL
1817};
1818
1819static const struct option replay_options[] = {
1967936d
ACM
1820 OPT_UINTEGER('r', "repeat", &replay_repeat,
1821 "repeat the workload replay N times (-1: infinite)"),
c0555642 1822 OPT_INCR('v', "verbose", &verbose,
f2858d8a
IM
1823 "be more verbose (show symbol address, etc)"),
1824 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1825 "dump raw trace in ASCII"),
0a02ad93
IM
1826 OPT_END()
1827};
1828
daa1d7a5
FW
1829static void setup_sorting(void)
1830{
1831 char *tmp, *tok, *str = strdup(sort_order);
1832
1833 for (tok = strtok_r(str, ", ", &tmp);
1834 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1835 if (sort_dimension__add(tok, &sort_list) < 0) {
1836 error("Unknown --sort key: `%s'", tok);
f2858d8a 1837 usage_with_options(latency_usage, latency_options);
daa1d7a5
FW
1838 }
1839 }
1840
1841 free(str);
1842
cbef79a8 1843 sort_dimension__add("pid", &cmp_pid);
daa1d7a5
FW
1844}
1845
1fc35b29
IM
1846static const char *record_args[] = {
1847 "record",
1848 "-a",
1849 "-R",
ea57c4f5 1850 "-f",
dc02bf71 1851 "-m", "1024",
1fc35b29
IM
1852 "-c", "1",
1853 "-e", "sched:sched_switch:r",
1854 "-e", "sched:sched_stat_wait:r",
1855 "-e", "sched:sched_stat_sleep:r",
1856 "-e", "sched:sched_stat_iowait:r",
ea57c4f5 1857 "-e", "sched:sched_stat_runtime:r",
1fc35b29
IM
1858 "-e", "sched:sched_process_exit:r",
1859 "-e", "sched:sched_process_fork:r",
1860 "-e", "sched:sched_wakeup:r",
1861 "-e", "sched:sched_migrate_task:r",
1862};
1863
1864static int __cmd_record(int argc, const char **argv)
1865{
1866 unsigned int rec_argc, i, j;
1867 const char **rec_argv;
1868
1869 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1870 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1871
1872 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1873 rec_argv[i] = strdup(record_args[i]);
1874
1875 for (j = 1; j < (unsigned int)argc; j++, i++)
1876 rec_argv[i] = argv[j];
1877
1878 BUG_ON(i != rec_argc);
1879
1880 return cmd_record(i, rec_argv, NULL);
1881}
1882
0a02ad93
IM
1883int cmd_sched(int argc, const char **argv, const char *prefix __used)
1884{
f2858d8a
IM
1885 argc = parse_options(argc, argv, sched_options, sched_usage,
1886 PARSE_OPT_STOP_AT_NON_OPTION);
1887 if (!argc)
1888 usage_with_options(sched_usage, sched_options);
0a02ad93 1889
c0777c5a
XG
1890 /*
1891 * Aliased to 'perf trace' for now:
1892 */
1893 if (!strcmp(argv[0], "trace"))
1894 return cmd_trace(argc, argv, prefix);
1895
75be6cf4 1896 symbol__init();
1fc35b29
IM
1897 if (!strncmp(argv[0], "rec", 3)) {
1898 return __cmd_record(argc, argv);
1899 } else if (!strncmp(argv[0], "lat", 3)) {
cdce9d73 1900 trace_handler = &lat_ops;
f2858d8a
IM
1901 if (argc > 1) {
1902 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1903 if (argc)
1904 usage_with_options(latency_usage, latency_options);
f2858d8a 1905 }
b5fae128 1906 setup_sorting();
46f392c9 1907 __cmd_lat();
0ec04e16
IM
1908 } else if (!strcmp(argv[0], "map")) {
1909 trace_handler = &map_ops;
1910 setup_sorting();
1911 __cmd_map();
f2858d8a
IM
1912 } else if (!strncmp(argv[0], "rep", 3)) {
1913 trace_handler = &replay_ops;
1914 if (argc) {
1915 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1916 if (argc)
1917 usage_with_options(replay_usage, replay_options);
1918 }
1919 __cmd_replay();
1920 } else {
1921 usage_with_options(sched_usage, sched_options);
1922 }
1923
ec156764 1924 return 0;
0a02ad93 1925}