]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace_clock.c
sched_clock: Add local_clock() API and improve documentation
[net-next-2.6.git] / kernel / trace / trace_clock.c
CommitLineData
14131f2f
IM
1/*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15#include <linux/spinlock.h>
ae1f3038 16#include <linux/irqflags.h>
14131f2f
IM
17#include <linux/hardirq.h>
18#include <linux/module.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/ktime.h>
b8b94265 22#include <linux/trace_clock.h>
14131f2f 23
8b2a5dac
SR
24#include "trace.h"
25
14131f2f
IM
26/*
27 * trace_clock_local(): the simplest and least coherent tracing clock.
28 *
29 * Useful for tracing that does not cross to other CPUs nor
30 * does it go through idle events.
31 */
32u64 notrace trace_clock_local(void)
33{
6cc3c6e1 34 u64 clock;
8b2a5dac 35 int resched;
6cc3c6e1 36
14131f2f
IM
37 /*
38 * sched_clock() is an architecture implemented, fast, scalable,
39 * lockless clock. It is not guaranteed to be coherent across
40 * CPUs, nor across CPU idle events.
41 */
8b2a5dac 42 resched = ftrace_preempt_disable();
6cc3c6e1 43 clock = sched_clock();
8b2a5dac 44 ftrace_preempt_enable(resched);
6cc3c6e1
PZ
45
46 return clock;
14131f2f
IM
47}
48
49/*
50 * trace_clock(): 'inbetween' trace clock. Not completely serialized,
51 * but not completely incorrect when crossing CPUs either.
52 *
53 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
54 * jitter between CPUs. So it's a pretty scalable clock, but there
55 * can be offsets in the trace data.
56 */
57u64 notrace trace_clock(void)
58{
c676329a 59 return local_clock();
14131f2f
IM
60}
61
62
63/*
64 * trace_clock_global(): special globally coherent trace clock
65 *
66 * It has higher overhead than the other trace clocks but is still
67 * an order of magnitude faster than GTOD derived hardware clocks.
68 *
69 * Used by plugins that need globally coherent timestamps.
70 */
71
6ca6cca3
SR
72/* keep prev_time and lock in the same cacheline. */
73static struct {
74 u64 prev_time;
445c8951 75 arch_spinlock_t lock;
6ca6cca3
SR
76} trace_clock_struct ____cacheline_aligned_in_smp =
77 {
edc35bd7 78 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
6ca6cca3 79 };
14131f2f
IM
80
81u64 notrace trace_clock_global(void)
82{
83 unsigned long flags;
84 int this_cpu;
85 u64 now;
86
e36673ec 87 local_irq_save(flags);
14131f2f
IM
88
89 this_cpu = raw_smp_processor_id();
90 now = cpu_clock(this_cpu);
91 /*
92 * If in an NMI context then dont risk lockups and return the
93 * cpu_clock() time:
94 */
95 if (unlikely(in_nmi()))
96 goto out;
97
0199c4e6 98 arch_spin_lock(&trace_clock_struct.lock);
14131f2f
IM
99
100 /*
101 * TODO: if this happens often then maybe we should reset
6ca6cca3 102 * my_scd->clock to prev_time+1, to make sure
14131f2f
IM
103 * we start ticking with the local clock from now on?
104 */
6ca6cca3
SR
105 if ((s64)(now - trace_clock_struct.prev_time) < 0)
106 now = trace_clock_struct.prev_time + 1;
14131f2f 107
6ca6cca3 108 trace_clock_struct.prev_time = now;
14131f2f 109
0199c4e6 110 arch_spin_unlock(&trace_clock_struct.lock);
14131f2f
IM
111
112 out:
e36673ec 113 local_irq_restore(flags);
14131f2f
IM
114
115 return now;
116}