]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace_clock.c
tracing: Remove ftrace_preempt_disable/enable
[net-next-2.6.git] / kernel / trace / trace_clock.c
CommitLineData
14131f2f
IM
1/*
2 * tracing clocks
3 *
4 * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Implements 3 trace clock variants, with differing scalability/precision
7 * tradeoffs:
8 *
9 * - local: CPU-local trace clock
10 * - medium: scalable global clock with some jitter
11 * - global: globally monotonic, serialized clock
12 *
13 * Tracer plugins will chose a default from these clocks.
14 */
15#include <linux/spinlock.h>
ae1f3038 16#include <linux/irqflags.h>
14131f2f
IM
17#include <linux/hardirq.h>
18#include <linux/module.h>
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/ktime.h>
b8b94265 22#include <linux/trace_clock.h>
14131f2f 23
8b2a5dac
SR
24#include "trace.h"
25
14131f2f
IM
26/*
27 * trace_clock_local(): the simplest and least coherent tracing clock.
28 *
29 * Useful for tracing that does not cross to other CPUs nor
30 * does it go through idle events.
31 */
32u64 notrace trace_clock_local(void)
33{
6cc3c6e1
PZ
34 u64 clock;
35
14131f2f
IM
36 /*
37 * sched_clock() is an architecture implemented, fast, scalable,
38 * lockless clock. It is not guaranteed to be coherent across
39 * CPUs, nor across CPU idle events.
40 */
5168ae50 41 preempt_disable_notrace();
6cc3c6e1 42 clock = sched_clock();
5168ae50 43 preempt_enable_notrace();
6cc3c6e1
PZ
44
45 return clock;
14131f2f
IM
46}
47
48/*
49 * trace_clock(): 'inbetween' trace clock. Not completely serialized,
50 * but not completely incorrect when crossing CPUs either.
51 *
52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
53 * jitter between CPUs. So it's a pretty scalable clock, but there
54 * can be offsets in the trace data.
55 */
56u64 notrace trace_clock(void)
57{
58 return cpu_clock(raw_smp_processor_id());
59}
60
61
62/*
63 * trace_clock_global(): special globally coherent trace clock
64 *
65 * It has higher overhead than the other trace clocks but is still
66 * an order of magnitude faster than GTOD derived hardware clocks.
67 *
68 * Used by plugins that need globally coherent timestamps.
69 */
70
6ca6cca3
SR
71/* keep prev_time and lock in the same cacheline. */
72static struct {
73 u64 prev_time;
445c8951 74 arch_spinlock_t lock;
6ca6cca3
SR
75} trace_clock_struct ____cacheline_aligned_in_smp =
76 {
edc35bd7 77 .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
6ca6cca3 78 };
14131f2f
IM
79
80u64 notrace trace_clock_global(void)
81{
82 unsigned long flags;
83 int this_cpu;
84 u64 now;
85
e36673ec 86 local_irq_save(flags);
14131f2f
IM
87
88 this_cpu = raw_smp_processor_id();
89 now = cpu_clock(this_cpu);
90 /*
91 * If in an NMI context then dont risk lockups and return the
92 * cpu_clock() time:
93 */
94 if (unlikely(in_nmi()))
95 goto out;
96
0199c4e6 97 arch_spin_lock(&trace_clock_struct.lock);
14131f2f
IM
98
99 /*
100 * TODO: if this happens often then maybe we should reset
6ca6cca3 101 * my_scd->clock to prev_time+1, to make sure
14131f2f
IM
102 * we start ticking with the local clock from now on?
103 */
6ca6cca3
SR
104 if ((s64)(now - trace_clock_struct.prev_time) < 0)
105 now = trace_clock_struct.prev_time + 1;
14131f2f 106
6ca6cca3 107 trace_clock_struct.prev_time = now;
14131f2f 108
0199c4e6 109 arch_spin_unlock(&trace_clock_struct.lock);
14131f2f
IM
110
111 out:
e36673ec 112 local_irq_restore(flags);
14131f2f
IM
113
114 return now;
115}