]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/hrtimer.c
dmar: Fix build failure without NUMA, warn on bogus RHSA tables and don't abort
[net-next-2.6.git] / kernel / hrtimer.c
CommitLineData
c0a31329
TG
1/*
2 * linux/kernel/hrtimer.c
3 *
3c8aa39d 4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
79bf2bb3 5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
54cdfdb4 6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
c0a31329
TG
7 *
8 * High-resolution kernel timers
9 *
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
13 *
14 * These timers are currently used for:
15 * - itimers
16 * - POSIX timers
17 * - nanosleep
18 * - precise in-kernel timing
19 *
20 * Started by: Thomas Gleixner and Ingo Molnar
21 *
22 * Credits:
23 * based on kernel/timer.c
24 *
66188fae
TG
25 * Help, testing, suggestions, bugfixes, improvements were
26 * provided by:
27 *
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
29 * et. al.
30 *
c0a31329
TG
31 * For licencing details see kernel-base/COPYING
32 */
33
34#include <linux/cpu.h>
35#include <linux/module.h>
36#include <linux/percpu.h>
37#include <linux/hrtimer.h>
38#include <linux/notifier.h>
39#include <linux/syscalls.h>
54cdfdb4 40#include <linux/kallsyms.h>
c0a31329 41#include <linux/interrupt.h>
79bf2bb3 42#include <linux/tick.h>
54cdfdb4
TG
43#include <linux/seq_file.h>
44#include <linux/err.h>
237fc6e7 45#include <linux/debugobjects.h>
eea08f32
AB
46#include <linux/sched.h>
47#include <linux/timer.h>
c0a31329
TG
48
49#include <asm/uaccess.h>
50
c6a2a177
XG
51#include <trace/events/timer.h>
52
c0a31329
TG
53/*
54 * The timer bases:
7978672c
GA
55 *
56 * Note: If we want to add new timer bases, we have to skip the two
57 * clock ids captured by the cpu-timers. We do this by holding empty
58 * entries rather than doing math adjustment of the clock ids.
59 * This ensures that we capture erroneous accesses to these clock ids
60 * rather than moving them into the range of valid clock id's.
c0a31329 61 */
54cdfdb4 62DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
c0a31329 63{
3c8aa39d
TG
64
65 .clock_base =
c0a31329 66 {
3c8aa39d
TG
67 {
68 .index = CLOCK_REALTIME,
69 .get_time = &ktime_get_real,
54cdfdb4 70 .resolution = KTIME_LOW_RES,
3c8aa39d
TG
71 },
72 {
73 .index = CLOCK_MONOTONIC,
74 .get_time = &ktime_get,
54cdfdb4 75 .resolution = KTIME_LOW_RES,
3c8aa39d
TG
76 },
77 }
c0a31329
TG
78};
79
92127c7a
TG
80/*
81 * Get the coarse grained time at the softirq based on xtime and
82 * wall_to_monotonic.
83 */
3c8aa39d 84static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
92127c7a
TG
85{
86 ktime_t xtim, tomono;
ad28d94a 87 struct timespec xts, tom;
92127c7a
TG
88 unsigned long seq;
89
90 do {
91 seq = read_seqbegin(&xtime_lock);
2c6b47de 92 xts = current_kernel_time();
ad28d94a 93 tom = wall_to_monotonic;
92127c7a
TG
94 } while (read_seqretry(&xtime_lock, seq));
95
f4304ab2 96 xtim = timespec_to_ktime(xts);
ad28d94a 97 tomono = timespec_to_ktime(tom);
3c8aa39d
TG
98 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
99 base->clock_base[CLOCK_MONOTONIC].softirq_time =
100 ktime_add(xtim, tomono);
92127c7a
TG
101}
102
c0a31329
TG
103/*
104 * Functions and macros which are different for UP/SMP systems are kept in a
105 * single place
106 */
107#ifdef CONFIG_SMP
108
c0a31329
TG
109/*
110 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
111 * means that all timers which are tied to this base via timer->base are
112 * locked, and the base itself is locked too.
113 *
114 * So __run_timers/migrate_timers can safely modify all timers which could
115 * be found on the lists/queues.
116 *
117 * When the timer's base is locked, and the timer removed from list, it is
118 * possible to set timer->base = NULL and drop the lock: the timer remains
119 * locked.
120 */
3c8aa39d
TG
121static
122struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
123 unsigned long *flags)
c0a31329 124{
3c8aa39d 125 struct hrtimer_clock_base *base;
c0a31329
TG
126
127 for (;;) {
128 base = timer->base;
129 if (likely(base != NULL)) {
3c8aa39d 130 spin_lock_irqsave(&base->cpu_base->lock, *flags);
c0a31329
TG
131 if (likely(base == timer->base))
132 return base;
133 /* The timer has migrated to another CPU: */
3c8aa39d 134 spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
c0a31329
TG
135 }
136 cpu_relax();
137 }
138}
139
6ff7041d
TG
140
141/*
142 * Get the preferred target CPU for NOHZ
143 */
144static int hrtimer_get_target(int this_cpu, int pinned)
145{
146#ifdef CONFIG_NO_HZ
147 if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) {
148 int preferred_cpu = get_nohz_load_balancer();
149
150 if (preferred_cpu >= 0)
151 return preferred_cpu;
152 }
153#endif
154 return this_cpu;
155}
156
157/*
158 * With HIGHRES=y we do not migrate the timer when it is expiring
159 * before the next event on the target cpu because we cannot reprogram
160 * the target cpu hardware and we would cause it to fire late.
161 *
162 * Called with cpu_base->lock of target cpu held.
163 */
164static int
165hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
166{
167#ifdef CONFIG_HIGH_RES_TIMERS
168 ktime_t expires;
169
170 if (!new_base->cpu_base->hres_active)
171 return 0;
172
173 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
174 return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
175#else
176 return 0;
177#endif
178}
179
c0a31329
TG
180/*
181 * Switch the timer base to the current CPU when possible.
182 */
3c8aa39d 183static inline struct hrtimer_clock_base *
597d0275
AB
184switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
185 int pinned)
c0a31329 186{
3c8aa39d
TG
187 struct hrtimer_clock_base *new_base;
188 struct hrtimer_cpu_base *new_cpu_base;
6ff7041d
TG
189 int this_cpu = smp_processor_id();
190 int cpu = hrtimer_get_target(this_cpu, pinned);
c0a31329 191
eea08f32
AB
192again:
193 new_cpu_base = &per_cpu(hrtimer_bases, cpu);
3c8aa39d 194 new_base = &new_cpu_base->clock_base[base->index];
c0a31329
TG
195
196 if (base != new_base) {
197 /*
6ff7041d 198 * We are trying to move timer to new_base.
c0a31329
TG
199 * However we can't change timer's base while it is running,
200 * so we keep it on the same CPU. No hassle vs. reprogramming
201 * the event source in the high resolution case. The softirq
202 * code will take care of this when the timer function has
203 * completed. There is no conflict as we hold the lock until
204 * the timer is enqueued.
205 */
54cdfdb4 206 if (unlikely(hrtimer_callback_running(timer)))
c0a31329
TG
207 return base;
208
209 /* See the comment in lock_timer_base() */
210 timer->base = NULL;
3c8aa39d
TG
211 spin_unlock(&base->cpu_base->lock);
212 spin_lock(&new_base->cpu_base->lock);
eea08f32 213
6ff7041d
TG
214 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
215 cpu = this_cpu;
216 spin_unlock(&new_base->cpu_base->lock);
217 spin_lock(&base->cpu_base->lock);
218 timer->base = base;
219 goto again;
eea08f32 220 }
c0a31329
TG
221 timer->base = new_base;
222 }
223 return new_base;
224}
225
226#else /* CONFIG_SMP */
227
3c8aa39d 228static inline struct hrtimer_clock_base *
c0a31329
TG
229lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
230{
3c8aa39d 231 struct hrtimer_clock_base *base = timer->base;
c0a31329 232
3c8aa39d 233 spin_lock_irqsave(&base->cpu_base->lock, *flags);
c0a31329
TG
234
235 return base;
236}
237
eea08f32 238# define switch_hrtimer_base(t, b, p) (b)
c0a31329
TG
239
240#endif /* !CONFIG_SMP */
241
242/*
243 * Functions for the union type storage format of ktime_t which are
244 * too large for inlining:
245 */
246#if BITS_PER_LONG < 64
247# ifndef CONFIG_KTIME_SCALAR
248/**
249 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
c0a31329
TG
250 * @kt: addend
251 * @nsec: the scalar nsec value to add
252 *
253 * Returns the sum of kt and nsec in ktime_t format
254 */
255ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
256{
257 ktime_t tmp;
258
259 if (likely(nsec < NSEC_PER_SEC)) {
260 tmp.tv64 = nsec;
261 } else {
262 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
263
264 tmp = ktime_set((long)nsec, rem);
265 }
266
267 return ktime_add(kt, tmp);
268}
b8b8fd2d
DH
269
270EXPORT_SYMBOL_GPL(ktime_add_ns);
a272378d
ACM
271
272/**
273 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
274 * @kt: minuend
275 * @nsec: the scalar nsec value to subtract
276 *
277 * Returns the subtraction of @nsec from @kt in ktime_t format
278 */
279ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
280{
281 ktime_t tmp;
282
283 if (likely(nsec < NSEC_PER_SEC)) {
284 tmp.tv64 = nsec;
285 } else {
286 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
287
288 tmp = ktime_set((long)nsec, rem);
289 }
290
291 return ktime_sub(kt, tmp);
292}
293
294EXPORT_SYMBOL_GPL(ktime_sub_ns);
c0a31329
TG
295# endif /* !CONFIG_KTIME_SCALAR */
296
297/*
298 * Divide a ktime value by a nanosecond value
299 */
4d672e7a 300u64 ktime_divns(const ktime_t kt, s64 div)
c0a31329 301{
900cfa46 302 u64 dclc;
c0a31329
TG
303 int sft = 0;
304
900cfa46 305 dclc = ktime_to_ns(kt);
c0a31329
TG
306 /* Make sure the divisor is less than 2^32: */
307 while (div >> 32) {
308 sft++;
309 div >>= 1;
310 }
311 dclc >>= sft;
312 do_div(dclc, (unsigned long) div);
313
4d672e7a 314 return dclc;
c0a31329 315}
c0a31329
TG
316#endif /* BITS_PER_LONG >= 64 */
317
5a7780e7
TG
318/*
319 * Add two ktime values and do a safety check for overflow:
320 */
321ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
322{
323 ktime_t res = ktime_add(lhs, rhs);
324
325 /*
326 * We use KTIME_SEC_MAX here, the maximum timeout which we can
327 * return to user space in a timespec:
328 */
329 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
330 res = ktime_set(KTIME_SEC_MAX, 0);
331
332 return res;
333}
334
8daa21e6
AB
335EXPORT_SYMBOL_GPL(ktime_add_safe);
336
237fc6e7
TG
337#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
338
339static struct debug_obj_descr hrtimer_debug_descr;
340
341/*
342 * fixup_init is called when:
343 * - an active object is initialized
344 */
345static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
346{
347 struct hrtimer *timer = addr;
348
349 switch (state) {
350 case ODEBUG_STATE_ACTIVE:
351 hrtimer_cancel(timer);
352 debug_object_init(timer, &hrtimer_debug_descr);
353 return 1;
354 default:
355 return 0;
356 }
357}
358
359/*
360 * fixup_activate is called when:
361 * - an active object is activated
362 * - an unknown object is activated (might be a statically initialized object)
363 */
364static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
365{
366 switch (state) {
367
368 case ODEBUG_STATE_NOTAVAILABLE:
369 WARN_ON_ONCE(1);
370 return 0;
371
372 case ODEBUG_STATE_ACTIVE:
373 WARN_ON(1);
374
375 default:
376 return 0;
377 }
378}
379
380/*
381 * fixup_free is called when:
382 * - an active object is freed
383 */
384static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
385{
386 struct hrtimer *timer = addr;
387
388 switch (state) {
389 case ODEBUG_STATE_ACTIVE:
390 hrtimer_cancel(timer);
391 debug_object_free(timer, &hrtimer_debug_descr);
392 return 1;
393 default:
394 return 0;
395 }
396}
397
398static struct debug_obj_descr hrtimer_debug_descr = {
399 .name = "hrtimer",
400 .fixup_init = hrtimer_fixup_init,
401 .fixup_activate = hrtimer_fixup_activate,
402 .fixup_free = hrtimer_fixup_free,
403};
404
405static inline void debug_hrtimer_init(struct hrtimer *timer)
406{
407 debug_object_init(timer, &hrtimer_debug_descr);
408}
409
410static inline void debug_hrtimer_activate(struct hrtimer *timer)
411{
412 debug_object_activate(timer, &hrtimer_debug_descr);
413}
414
415static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
416{
417 debug_object_deactivate(timer, &hrtimer_debug_descr);
418}
419
420static inline void debug_hrtimer_free(struct hrtimer *timer)
421{
422 debug_object_free(timer, &hrtimer_debug_descr);
423}
424
425static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
426 enum hrtimer_mode mode);
427
428void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
429 enum hrtimer_mode mode)
430{
431 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
432 __hrtimer_init(timer, clock_id, mode);
433}
2bc481cf 434EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
237fc6e7
TG
435
436void destroy_hrtimer_on_stack(struct hrtimer *timer)
437{
438 debug_object_free(timer, &hrtimer_debug_descr);
439}
440
441#else
442static inline void debug_hrtimer_init(struct hrtimer *timer) { }
443static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
444static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
445#endif
446
c6a2a177
XG
447static inline void
448debug_init(struct hrtimer *timer, clockid_t clockid,
449 enum hrtimer_mode mode)
450{
451 debug_hrtimer_init(timer);
452 trace_hrtimer_init(timer, clockid, mode);
453}
454
455static inline void debug_activate(struct hrtimer *timer)
456{
457 debug_hrtimer_activate(timer);
458 trace_hrtimer_start(timer);
459}
460
461static inline void debug_deactivate(struct hrtimer *timer)
462{
463 debug_hrtimer_deactivate(timer);
464 trace_hrtimer_cancel(timer);
465}
466
54cdfdb4
TG
467/* High resolution timer related functions */
468#ifdef CONFIG_HIGH_RES_TIMERS
469
470/*
471 * High resolution timer enabled ?
472 */
473static int hrtimer_hres_enabled __read_mostly = 1;
474
475/*
476 * Enable / Disable high resolution mode
477 */
478static int __init setup_hrtimer_hres(char *str)
479{
480 if (!strcmp(str, "off"))
481 hrtimer_hres_enabled = 0;
482 else if (!strcmp(str, "on"))
483 hrtimer_hres_enabled = 1;
484 else
485 return 0;
486 return 1;
487}
488
489__setup("highres=", setup_hrtimer_hres);
490
491/*
492 * hrtimer_high_res_enabled - query, if the highres mode is enabled
493 */
494static inline int hrtimer_is_hres_enabled(void)
495{
496 return hrtimer_hres_enabled;
497}
498
499/*
500 * Is the high resolution mode active ?
501 */
502static inline int hrtimer_hres_active(void)
503{
504 return __get_cpu_var(hrtimer_bases).hres_active;
505}
506
507/*
508 * Reprogram the event source with checking both queues for the
509 * next event
510 * Called with interrupts disabled and base->lock held
511 */
512static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
513{
514 int i;
515 struct hrtimer_clock_base *base = cpu_base->clock_base;
516 ktime_t expires;
517
518 cpu_base->expires_next.tv64 = KTIME_MAX;
519
520 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
521 struct hrtimer *timer;
522
523 if (!base->first)
524 continue;
525 timer = rb_entry(base->first, struct hrtimer, node);
cc584b21 526 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
b0a9b511
TG
527 /*
528 * clock_was_set() has changed base->offset so the
529 * result might be negative. Fix it up to prevent a
530 * false positive in clockevents_program_event()
531 */
532 if (expires.tv64 < 0)
533 expires.tv64 = 0;
54cdfdb4
TG
534 if (expires.tv64 < cpu_base->expires_next.tv64)
535 cpu_base->expires_next = expires;
536 }
537
538 if (cpu_base->expires_next.tv64 != KTIME_MAX)
539 tick_program_event(cpu_base->expires_next, 1);
540}
541
542/*
543 * Shared reprogramming for clock_realtime and clock_monotonic
544 *
545 * When a timer is enqueued and expires earlier than the already enqueued
546 * timers, we have to check, whether it expires earlier than the timer for
547 * which the clock event device was armed.
548 *
549 * Called with interrupts disabled and base->cpu_base.lock held
550 */
551static int hrtimer_reprogram(struct hrtimer *timer,
552 struct hrtimer_clock_base *base)
553{
554 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
cc584b21 555 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
54cdfdb4
TG
556 int res;
557
cc584b21 558 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
63070a79 559
54cdfdb4
TG
560 /*
561 * When the callback is running, we do not reprogram the clock event
562 * device. The timer callback is either running on a different CPU or
3a4fa0a2 563 * the callback is executed in the hrtimer_interrupt context. The
54cdfdb4
TG
564 * reprogramming is handled either by the softirq, which called the
565 * callback or at the end of the hrtimer_interrupt.
566 */
567 if (hrtimer_callback_running(timer))
568 return 0;
569
63070a79
TG
570 /*
571 * CLOCK_REALTIME timer might be requested with an absolute
572 * expiry time which is less than base->offset. Nothing wrong
573 * about that, just avoid to call into the tick code, which
574 * has now objections against negative expiry values.
575 */
576 if (expires.tv64 < 0)
577 return -ETIME;
578
54cdfdb4
TG
579 if (expires.tv64 >= expires_next->tv64)
580 return 0;
581
582 /*
583 * Clockevents returns -ETIME, when the event was in the past.
584 */
585 res = tick_program_event(expires, 0);
586 if (!IS_ERR_VALUE(res))
587 *expires_next = expires;
588 return res;
589}
590
591
592/*
593 * Retrigger next event is called after clock was set
594 *
595 * Called with interrupts disabled via on_each_cpu()
596 */
597static void retrigger_next_event(void *arg)
598{
599 struct hrtimer_cpu_base *base;
600 struct timespec realtime_offset;
601 unsigned long seq;
602
603 if (!hrtimer_hres_active())
604 return;
605
606 do {
607 seq = read_seqbegin(&xtime_lock);
608 set_normalized_timespec(&realtime_offset,
609 -wall_to_monotonic.tv_sec,
610 -wall_to_monotonic.tv_nsec);
611 } while (read_seqretry(&xtime_lock, seq));
612
613 base = &__get_cpu_var(hrtimer_bases);
614
615 /* Adjust CLOCK_REALTIME offset */
616 spin_lock(&base->lock);
617 base->clock_base[CLOCK_REALTIME].offset =
618 timespec_to_ktime(realtime_offset);
619
620 hrtimer_force_reprogram(base);
621 spin_unlock(&base->lock);
622}
623
624/*
625 * Clock realtime was set
626 *
627 * Change the offset of the realtime clock vs. the monotonic
628 * clock.
629 *
630 * We might have to reprogram the high resolution timer interrupt. On
631 * SMP we call the architecture specific code to retrigger _all_ high
632 * resolution timer interrupts. On UP we just disable interrupts and
633 * call the high resolution interrupt code.
634 */
635void clock_was_set(void)
636{
637 /* Retrigger the CPU local events everywhere */
15c8b6c1 638 on_each_cpu(retrigger_next_event, NULL, 1);
54cdfdb4
TG
639}
640
995f054f
IM
641/*
642 * During resume we might have to reprogram the high resolution timer
643 * interrupt (on the local CPU):
644 */
645void hres_timers_resume(void)
646{
1d4a7f1c
PZ
647 WARN_ONCE(!irqs_disabled(),
648 KERN_INFO "hres_timers_resume() called with IRQs enabled!");
649
995f054f
IM
650 retrigger_next_event(NULL);
651}
652
54cdfdb4
TG
653/*
654 * Initialize the high resolution related parts of cpu_base
655 */
656static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
657{
658 base->expires_next.tv64 = KTIME_MAX;
659 base->hres_active = 0;
54cdfdb4
TG
660}
661
662/*
663 * Initialize the high resolution related parts of a hrtimer
664 */
665static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
666{
54cdfdb4
TG
667}
668
ca109491 669
54cdfdb4
TG
670/*
671 * When High resolution timers are active, try to reprogram. Note, that in case
672 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
673 * check happens. The timer gets enqueued into the rbtree. The reprogramming
674 * and expiry check is done in the hrtimer_interrupt or in the softirq.
675 */
676static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
7f1e2ca9
PZ
677 struct hrtimer_clock_base *base,
678 int wakeup)
54cdfdb4
TG
679{
680 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
7f1e2ca9
PZ
681 if (wakeup) {
682 spin_unlock(&base->cpu_base->lock);
683 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
684 spin_lock(&base->cpu_base->lock);
685 } else
686 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
687
ca109491 688 return 1;
54cdfdb4 689 }
7f1e2ca9 690
54cdfdb4
TG
691 return 0;
692}
693
694/*
695 * Switch to high resolution mode
696 */
f8953856 697static int hrtimer_switch_to_hres(void)
54cdfdb4 698{
820de5c3
IM
699 int cpu = smp_processor_id();
700 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
54cdfdb4
TG
701 unsigned long flags;
702
703 if (base->hres_active)
f8953856 704 return 1;
54cdfdb4
TG
705
706 local_irq_save(flags);
707
708 if (tick_init_highres()) {
709 local_irq_restore(flags);
820de5c3
IM
710 printk(KERN_WARNING "Could not switch to high resolution "
711 "mode on CPU %d\n", cpu);
f8953856 712 return 0;
54cdfdb4
TG
713 }
714 base->hres_active = 1;
715 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
716 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
717
718 tick_setup_sched_timer();
719
720 /* "Retrigger" the interrupt to get things going */
721 retrigger_next_event(NULL);
722 local_irq_restore(flags);
edfed66e 723 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
54cdfdb4 724 smp_processor_id());
f8953856 725 return 1;
54cdfdb4
TG
726}
727
728#else
729
730static inline int hrtimer_hres_active(void) { return 0; }
731static inline int hrtimer_is_hres_enabled(void) { return 0; }
f8953856 732static inline int hrtimer_switch_to_hres(void) { return 0; }
54cdfdb4
TG
733static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
734static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
7f1e2ca9
PZ
735 struct hrtimer_clock_base *base,
736 int wakeup)
54cdfdb4
TG
737{
738 return 0;
739}
54cdfdb4
TG
740static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
741static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
742
743#endif /* CONFIG_HIGH_RES_TIMERS */
744
82f67cd9
IM
745#ifdef CONFIG_TIMER_STATS
746void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
747{
748 if (timer->start_site)
749 return;
750
751 timer->start_site = addr;
752 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
753 timer->start_pid = current->pid;
754}
755#endif
756
c0a31329 757/*
6506f2aa 758 * Counterpart to lock_hrtimer_base above:
c0a31329
TG
759 */
760static inline
761void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
762{
3c8aa39d 763 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
c0a31329
TG
764}
765
766/**
767 * hrtimer_forward - forward the timer expiry
c0a31329 768 * @timer: hrtimer to forward
44f21475 769 * @now: forward past this time
c0a31329
TG
770 * @interval: the interval to forward
771 *
772 * Forward the timer expiry so it will expire in the future.
8dca6f33 773 * Returns the number of overruns.
c0a31329 774 */
4d672e7a 775u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
c0a31329 776{
4d672e7a 777 u64 orun = 1;
44f21475 778 ktime_t delta;
c0a31329 779
cc584b21 780 delta = ktime_sub(now, hrtimer_get_expires(timer));
c0a31329
TG
781
782 if (delta.tv64 < 0)
783 return 0;
784
c9db4fa1
TG
785 if (interval.tv64 < timer->base->resolution.tv64)
786 interval.tv64 = timer->base->resolution.tv64;
787
c0a31329 788 if (unlikely(delta.tv64 >= interval.tv64)) {
df869b63 789 s64 incr = ktime_to_ns(interval);
c0a31329
TG
790
791 orun = ktime_divns(delta, incr);
cc584b21
AV
792 hrtimer_add_expires_ns(timer, incr * orun);
793 if (hrtimer_get_expires_tv64(timer) > now.tv64)
c0a31329
TG
794 return orun;
795 /*
796 * This (and the ktime_add() below) is the
797 * correction for exact:
798 */
799 orun++;
800 }
cc584b21 801 hrtimer_add_expires(timer, interval);
c0a31329
TG
802
803 return orun;
804}
6bdb6b62 805EXPORT_SYMBOL_GPL(hrtimer_forward);
c0a31329
TG
806
807/*
808 * enqueue_hrtimer - internal function to (re)start a timer
809 *
810 * The timer is inserted in expiry order. Insertion into the
811 * red black tree is O(log(n)). Must hold the base lock.
a6037b61
PZ
812 *
813 * Returns 1 when the new timer is the leftmost timer in the tree.
c0a31329 814 */
a6037b61
PZ
815static int enqueue_hrtimer(struct hrtimer *timer,
816 struct hrtimer_clock_base *base)
c0a31329
TG
817{
818 struct rb_node **link = &base->active.rb_node;
c0a31329
TG
819 struct rb_node *parent = NULL;
820 struct hrtimer *entry;
99bc2fcb 821 int leftmost = 1;
c0a31329 822
c6a2a177 823 debug_activate(timer);
237fc6e7 824
c0a31329
TG
825 /*
826 * Find the right place in the rbtree:
827 */
828 while (*link) {
829 parent = *link;
830 entry = rb_entry(parent, struct hrtimer, node);
831 /*
832 * We dont care about collisions. Nodes with
833 * the same expiry time stay together.
834 */
cc584b21
AV
835 if (hrtimer_get_expires_tv64(timer) <
836 hrtimer_get_expires_tv64(entry)) {
c0a31329 837 link = &(*link)->rb_left;
99bc2fcb 838 } else {
c0a31329 839 link = &(*link)->rb_right;
99bc2fcb
IM
840 leftmost = 0;
841 }
c0a31329
TG
842 }
843
844 /*
288867ec
TG
845 * Insert the timer to the rbtree and check whether it
846 * replaces the first pending timer
c0a31329 847 */
a6037b61 848 if (leftmost)
54cdfdb4 849 base->first = &timer->node;
54cdfdb4 850
c0a31329
TG
851 rb_link_node(&timer->node, parent, link);
852 rb_insert_color(&timer->node, &base->active);
303e967f
TG
853 /*
854 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
855 * state of a possibly running callback.
856 */
857 timer->state |= HRTIMER_STATE_ENQUEUED;
a6037b61
PZ
858
859 return leftmost;
288867ec 860}
c0a31329
TG
861
862/*
863 * __remove_hrtimer - internal function to remove a timer
864 *
865 * Caller must hold the base lock.
54cdfdb4
TG
866 *
867 * High resolution timer mode reprograms the clock event device when the
868 * timer is the one which expires next. The caller can disable this by setting
869 * reprogram to zero. This is useful, when the context does a reprogramming
870 * anyway (e.g. timer interrupt)
c0a31329 871 */
3c8aa39d 872static void __remove_hrtimer(struct hrtimer *timer,
303e967f 873 struct hrtimer_clock_base *base,
54cdfdb4 874 unsigned long newstate, int reprogram)
c0a31329 875{
ca109491 876 if (timer->state & HRTIMER_STATE_ENQUEUED) {
54cdfdb4
TG
877 /*
878 * Remove the timer from the rbtree and replace the
879 * first entry pointer if necessary.
880 */
881 if (base->first == &timer->node) {
882 base->first = rb_next(&timer->node);
883 /* Reprogram the clock event device. if enabled */
884 if (reprogram && hrtimer_hres_active())
885 hrtimer_force_reprogram(base->cpu_base);
886 }
887 rb_erase(&timer->node, &base->active);
888 }
303e967f 889 timer->state = newstate;
c0a31329
TG
890}
891
892/*
893 * remove hrtimer, called with base lock held
894 */
895static inline int
3c8aa39d 896remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
c0a31329 897{
303e967f 898 if (hrtimer_is_queued(timer)) {
54cdfdb4
TG
899 int reprogram;
900
901 /*
902 * Remove the timer and force reprogramming when high
903 * resolution mode is active and the timer is on the current
904 * CPU. If we remove a timer on another CPU, reprogramming is
905 * skipped. The interrupt event on this CPU is fired and
906 * reprogramming happens in the interrupt handler. This is a
907 * rare case and less expensive than a smp call.
908 */
c6a2a177 909 debug_deactivate(timer);
82f67cd9 910 timer_stats_hrtimer_clear_start_info(timer);
54cdfdb4
TG
911 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
912 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
913 reprogram);
c0a31329
TG
914 return 1;
915 }
916 return 0;
917}
918
7f1e2ca9
PZ
919int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
920 unsigned long delta_ns, const enum hrtimer_mode mode,
921 int wakeup)
c0a31329 922{
3c8aa39d 923 struct hrtimer_clock_base *base, *new_base;
c0a31329 924 unsigned long flags;
a6037b61 925 int ret, leftmost;
c0a31329
TG
926
927 base = lock_hrtimer_base(timer, &flags);
928
929 /* Remove an active timer from the queue: */
930 ret = remove_hrtimer(timer, base);
931
932 /* Switch the timer base, if necessary: */
597d0275 933 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
c0a31329 934
597d0275 935 if (mode & HRTIMER_MODE_REL) {
5a7780e7 936 tim = ktime_add_safe(tim, new_base->get_time());
06027bdd
IM
937 /*
938 * CONFIG_TIME_LOW_RES is a temporary way for architectures
939 * to signal that they simply return xtime in
940 * do_gettimeoffset(). In this case we want to round up by
941 * resolution when starting a relative timer, to avoid short
942 * timeouts. This will go away with the GTOD framework.
943 */
944#ifdef CONFIG_TIME_LOW_RES
5a7780e7 945 tim = ktime_add_safe(tim, base->resolution);
06027bdd
IM
946#endif
947 }
237fc6e7 948
da8f2e17 949 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
c0a31329 950
82f67cd9
IM
951 timer_stats_hrtimer_set_start_info(timer);
952
a6037b61
PZ
953 leftmost = enqueue_hrtimer(timer, new_base);
954
935c631d
IM
955 /*
956 * Only allow reprogramming if the new base is on this CPU.
957 * (it might still be on another CPU if the timer was pending)
a6037b61
PZ
958 *
959 * XXX send_remote_softirq() ?
935c631d 960 */
a6037b61 961 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
7f1e2ca9 962 hrtimer_enqueue_reprogram(timer, new_base, wakeup);
c0a31329
TG
963
964 unlock_hrtimer_base(timer, &flags);
965
966 return ret;
967}
7f1e2ca9
PZ
968
969/**
970 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
971 * @timer: the timer to be added
972 * @tim: expiry time
973 * @delta_ns: "slack" range for the timer
974 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
975 *
976 * Returns:
977 * 0 on success
978 * 1 when the timer was active
979 */
980int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
981 unsigned long delta_ns, const enum hrtimer_mode mode)
982{
983 return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
984}
da8f2e17
AV
985EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
986
987/**
e1dd7bc5 988 * hrtimer_start - (re)start an hrtimer on the current CPU
da8f2e17
AV
989 * @timer: the timer to be added
990 * @tim: expiry time
991 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
992 *
993 * Returns:
994 * 0 on success
995 * 1 when the timer was active
996 */
997int
998hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
999{
7f1e2ca9 1000 return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
da8f2e17 1001}
8d16b764 1002EXPORT_SYMBOL_GPL(hrtimer_start);
c0a31329 1003
da8f2e17 1004
c0a31329
TG
1005/**
1006 * hrtimer_try_to_cancel - try to deactivate a timer
c0a31329
TG
1007 * @timer: hrtimer to stop
1008 *
1009 * Returns:
1010 * 0 when the timer was not active
1011 * 1 when the timer was active
1012 * -1 when the timer is currently excuting the callback function and
fa9799e3 1013 * cannot be stopped
c0a31329
TG
1014 */
1015int hrtimer_try_to_cancel(struct hrtimer *timer)
1016{
3c8aa39d 1017 struct hrtimer_clock_base *base;
c0a31329
TG
1018 unsigned long flags;
1019 int ret = -1;
1020
1021 base = lock_hrtimer_base(timer, &flags);
1022
303e967f 1023 if (!hrtimer_callback_running(timer))
c0a31329
TG
1024 ret = remove_hrtimer(timer, base);
1025
1026 unlock_hrtimer_base(timer, &flags);
1027
1028 return ret;
1029
1030}
8d16b764 1031EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
c0a31329
TG
1032
1033/**
1034 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
c0a31329
TG
1035 * @timer: the timer to be cancelled
1036 *
1037 * Returns:
1038 * 0 when the timer was not active
1039 * 1 when the timer was active
1040 */
1041int hrtimer_cancel(struct hrtimer *timer)
1042{
1043 for (;;) {
1044 int ret = hrtimer_try_to_cancel(timer);
1045
1046 if (ret >= 0)
1047 return ret;
5ef37b19 1048 cpu_relax();
c0a31329
TG
1049 }
1050}
8d16b764 1051EXPORT_SYMBOL_GPL(hrtimer_cancel);
c0a31329
TG
1052
1053/**
1054 * hrtimer_get_remaining - get remaining time for the timer
c0a31329
TG
1055 * @timer: the timer to read
1056 */
1057ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1058{
3c8aa39d 1059 struct hrtimer_clock_base *base;
c0a31329
TG
1060 unsigned long flags;
1061 ktime_t rem;
1062
1063 base = lock_hrtimer_base(timer, &flags);
cc584b21 1064 rem = hrtimer_expires_remaining(timer);
c0a31329
TG
1065 unlock_hrtimer_base(timer, &flags);
1066
1067 return rem;
1068}
8d16b764 1069EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
c0a31329 1070
ee9c5785 1071#ifdef CONFIG_NO_HZ
69239749
TL
1072/**
1073 * hrtimer_get_next_event - get the time until next expiry event
1074 *
1075 * Returns the delta to the next expiry event or KTIME_MAX if no timer
1076 * is pending.
1077 */
1078ktime_t hrtimer_get_next_event(void)
1079{
3c8aa39d
TG
1080 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1081 struct hrtimer_clock_base *base = cpu_base->clock_base;
69239749
TL
1082 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1083 unsigned long flags;
1084 int i;
1085
3c8aa39d
TG
1086 spin_lock_irqsave(&cpu_base->lock, flags);
1087
54cdfdb4
TG
1088 if (!hrtimer_hres_active()) {
1089 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
1090 struct hrtimer *timer;
69239749 1091
54cdfdb4
TG
1092 if (!base->first)
1093 continue;
3c8aa39d 1094
54cdfdb4 1095 timer = rb_entry(base->first, struct hrtimer, node);
cc584b21 1096 delta.tv64 = hrtimer_get_expires_tv64(timer);
54cdfdb4
TG
1097 delta = ktime_sub(delta, base->get_time());
1098 if (delta.tv64 < mindelta.tv64)
1099 mindelta.tv64 = delta.tv64;
1100 }
69239749 1101 }
3c8aa39d
TG
1102
1103 spin_unlock_irqrestore(&cpu_base->lock, flags);
1104
69239749
TL
1105 if (mindelta.tv64 < 0)
1106 mindelta.tv64 = 0;
1107 return mindelta;
1108}
1109#endif
1110
237fc6e7
TG
1111static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1112 enum hrtimer_mode mode)
c0a31329 1113{
3c8aa39d 1114 struct hrtimer_cpu_base *cpu_base;
c0a31329 1115
7978672c
GA
1116 memset(timer, 0, sizeof(struct hrtimer));
1117
3c8aa39d 1118 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
c0a31329 1119
c9cb2e3d 1120 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
7978672c
GA
1121 clock_id = CLOCK_MONOTONIC;
1122
3c8aa39d 1123 timer->base = &cpu_base->clock_base[clock_id];
54cdfdb4 1124 hrtimer_init_timer_hres(timer);
82f67cd9
IM
1125
1126#ifdef CONFIG_TIMER_STATS
1127 timer->start_site = NULL;
1128 timer->start_pid = -1;
1129 memset(timer->start_comm, 0, TASK_COMM_LEN);
1130#endif
c0a31329 1131}
237fc6e7
TG
1132
1133/**
1134 * hrtimer_init - initialize a timer to the given clock
1135 * @timer: the timer to be initialized
1136 * @clock_id: the clock to be used
1137 * @mode: timer mode abs/rel
1138 */
1139void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1140 enum hrtimer_mode mode)
1141{
c6a2a177 1142 debug_init(timer, clock_id, mode);
237fc6e7
TG
1143 __hrtimer_init(timer, clock_id, mode);
1144}
8d16b764 1145EXPORT_SYMBOL_GPL(hrtimer_init);
c0a31329
TG
1146
1147/**
1148 * hrtimer_get_res - get the timer resolution for a clock
c0a31329
TG
1149 * @which_clock: which clock to query
1150 * @tp: pointer to timespec variable to store the resolution
1151 *
72fd4a35
RD
1152 * Store the resolution of the clock selected by @which_clock in the
1153 * variable pointed to by @tp.
c0a31329
TG
1154 */
1155int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1156{
3c8aa39d 1157 struct hrtimer_cpu_base *cpu_base;
c0a31329 1158
3c8aa39d
TG
1159 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1160 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
c0a31329
TG
1161
1162 return 0;
1163}
8d16b764 1164EXPORT_SYMBOL_GPL(hrtimer_get_res);
c0a31329 1165
c6a2a177 1166static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
d3d74453
PZ
1167{
1168 struct hrtimer_clock_base *base = timer->base;
1169 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1170 enum hrtimer_restart (*fn)(struct hrtimer *);
1171 int restart;
1172
ca109491
PZ
1173 WARN_ON(!irqs_disabled());
1174
c6a2a177 1175 debug_deactivate(timer);
d3d74453
PZ
1176 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1177 timer_stats_account_hrtimer(timer);
d3d74453 1178 fn = timer->function;
ca109491
PZ
1179
1180 /*
1181 * Because we run timers from hardirq context, there is no chance
1182 * they get migrated to another cpu, therefore its safe to unlock
1183 * the timer base.
1184 */
1185 spin_unlock(&cpu_base->lock);
c6a2a177 1186 trace_hrtimer_expire_entry(timer, now);
ca109491 1187 restart = fn(timer);
c6a2a177 1188 trace_hrtimer_expire_exit(timer);
ca109491 1189 spin_lock(&cpu_base->lock);
d3d74453
PZ
1190
1191 /*
e3f1d883
TG
1192 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1193 * we do not reprogramm the event hardware. Happens either in
1194 * hrtimer_start_range_ns() or in hrtimer_interrupt()
d3d74453
PZ
1195 */
1196 if (restart != HRTIMER_NORESTART) {
1197 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
a6037b61 1198 enqueue_hrtimer(timer, base);
d3d74453
PZ
1199 }
1200 timer->state &= ~HRTIMER_STATE_CALLBACK;
1201}
1202
54cdfdb4
TG
1203#ifdef CONFIG_HIGH_RES_TIMERS
1204
7f22391c
FW
1205static int force_clock_reprogram;
1206
1207/*
1208 * After 5 iteration's attempts, we consider that hrtimer_interrupt()
1209 * is hanging, which could happen with something that slows the interrupt
1210 * such as the tracing. Then we force the clock reprogramming for each future
1211 * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
1212 * threshold that we will overwrite.
1213 * The next tick event will be scheduled to 3 times we currently spend on
1214 * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
1215 * 1/4 of their time to process the hrtimer interrupts. This is enough to
1216 * let it running without serious starvation.
1217 */
1218
1219static inline void
1220hrtimer_interrupt_hanging(struct clock_event_device *dev,
1221 ktime_t try_time)
1222{
1223 force_clock_reprogram = 1;
1224 dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
1225 printk(KERN_WARNING "hrtimer: interrupt too slow, "
1226 "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
1227}
54cdfdb4
TG
1228/*
1229 * High resolution timer interrupt
1230 * Called with interrupts disabled
1231 */
1232void hrtimer_interrupt(struct clock_event_device *dev)
1233{
1234 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1235 struct hrtimer_clock_base *base;
1236 ktime_t expires_next, now;
7f22391c 1237 int nr_retries = 0;
ca109491 1238 int i;
54cdfdb4
TG
1239
1240 BUG_ON(!cpu_base->hres_active);
1241 cpu_base->nr_events++;
1242 dev->next_event.tv64 = KTIME_MAX;
1243
1244 retry:
7f22391c
FW
1245 /* 5 retries is enough to notice a hang */
1246 if (!(++nr_retries % 5))
1247 hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
1248
54cdfdb4
TG
1249 now = ktime_get();
1250
1251 expires_next.tv64 = KTIME_MAX;
1252
6ff7041d
TG
1253 spin_lock(&cpu_base->lock);
1254 /*
1255 * We set expires_next to KTIME_MAX here with cpu_base->lock
1256 * held to prevent that a timer is enqueued in our queue via
1257 * the migration code. This does not affect enqueueing of
1258 * timers which run their callback and need to be requeued on
1259 * this CPU.
1260 */
1261 cpu_base->expires_next.tv64 = KTIME_MAX;
1262
54cdfdb4
TG
1263 base = cpu_base->clock_base;
1264
1265 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1266 ktime_t basenow;
1267 struct rb_node *node;
1268
54cdfdb4
TG
1269 basenow = ktime_add(now, base->offset);
1270
1271 while ((node = base->first)) {
1272 struct hrtimer *timer;
1273
1274 timer = rb_entry(node, struct hrtimer, node);
1275
654c8e0b
AV
1276 /*
1277 * The immediate goal for using the softexpires is
1278 * minimizing wakeups, not running timers at the
1279 * earliest interrupt after their soft expiration.
1280 * This allows us to avoid using a Priority Search
1281 * Tree, which can answer a stabbing querry for
1282 * overlapping intervals and instead use the simple
1283 * BST we already have.
1284 * We don't add extra wakeups by delaying timers that
1285 * are right-of a not yet expired timer, because that
1286 * timer will have to trigger a wakeup anyway.
1287 */
1288
1289 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
54cdfdb4
TG
1290 ktime_t expires;
1291
cc584b21 1292 expires = ktime_sub(hrtimer_get_expires(timer),
54cdfdb4
TG
1293 base->offset);
1294 if (expires.tv64 < expires_next.tv64)
1295 expires_next = expires;
1296 break;
1297 }
1298
c6a2a177 1299 __run_hrtimer(timer, &basenow);
54cdfdb4 1300 }
54cdfdb4
TG
1301 base++;
1302 }
1303
6ff7041d
TG
1304 /*
1305 * Store the new expiry value so the migration code can verify
1306 * against it.
1307 */
54cdfdb4 1308 cpu_base->expires_next = expires_next;
6ff7041d 1309 spin_unlock(&cpu_base->lock);
54cdfdb4
TG
1310
1311 /* Reprogramming necessary ? */
1312 if (expires_next.tv64 != KTIME_MAX) {
7f22391c 1313 if (tick_program_event(expires_next, force_clock_reprogram))
54cdfdb4
TG
1314 goto retry;
1315 }
54cdfdb4
TG
1316}
1317
8bdec955
TG
1318/*
1319 * local version of hrtimer_peek_ahead_timers() called with interrupts
1320 * disabled.
1321 */
1322static void __hrtimer_peek_ahead_timers(void)
1323{
1324 struct tick_device *td;
1325
1326 if (!hrtimer_hres_active())
1327 return;
1328
1329 td = &__get_cpu_var(tick_cpu_device);
1330 if (td && td->evtdev)
1331 hrtimer_interrupt(td->evtdev);
1332}
1333
2e94d1f7
AV
1334/**
1335 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1336 *
1337 * hrtimer_peek_ahead_timers will peek at the timer queue of
1338 * the current cpu and check if there are any timers for which
1339 * the soft expires time has passed. If any such timers exist,
1340 * they are run immediately and then removed from the timer queue.
1341 *
1342 */
1343void hrtimer_peek_ahead_timers(void)
1344{
643bdf68 1345 unsigned long flags;
dc4304f7 1346
2e94d1f7 1347 local_irq_save(flags);
8bdec955 1348 __hrtimer_peek_ahead_timers();
2e94d1f7
AV
1349 local_irq_restore(flags);
1350}
1351
a6037b61
PZ
1352static void run_hrtimer_softirq(struct softirq_action *h)
1353{
1354 hrtimer_peek_ahead_timers();
1355}
1356
82c5b7b5
IM
1357#else /* CONFIG_HIGH_RES_TIMERS */
1358
1359static inline void __hrtimer_peek_ahead_timers(void) { }
1360
1361#endif /* !CONFIG_HIGH_RES_TIMERS */
82f67cd9 1362
d3d74453
PZ
1363/*
1364 * Called from timer softirq every jiffy, expire hrtimers:
1365 *
1366 * For HRT its the fall back code to run the softirq in the timer
1367 * softirq context in case the hrtimer initialization failed or has
1368 * not been done yet.
1369 */
1370void hrtimer_run_pending(void)
1371{
d3d74453
PZ
1372 if (hrtimer_hres_active())
1373 return;
54cdfdb4 1374
d3d74453
PZ
1375 /*
1376 * This _is_ ugly: We have to check in the softirq context,
1377 * whether we can switch to highres and / or nohz mode. The
1378 * clocksource switch happens in the timer interrupt with
1379 * xtime_lock held. Notification from there only sets the
1380 * check bit in the tick_oneshot code, otherwise we might
1381 * deadlock vs. xtime_lock.
1382 */
1383 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1384 hrtimer_switch_to_hres();
54cdfdb4
TG
1385}
1386
c0a31329 1387/*
d3d74453 1388 * Called from hardirq context every jiffy
c0a31329 1389 */
833883d9 1390void hrtimer_run_queues(void)
c0a31329 1391{
288867ec 1392 struct rb_node *node;
833883d9
DS
1393 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1394 struct hrtimer_clock_base *base;
1395 int index, gettime = 1;
c0a31329 1396
833883d9 1397 if (hrtimer_hres_active())
3055adda
DS
1398 return;
1399
833883d9
DS
1400 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
1401 base = &cpu_base->clock_base[index];
c0a31329 1402
833883d9 1403 if (!base->first)
d3d74453 1404 continue;
833883d9 1405
d7cfb60c 1406 if (gettime) {
833883d9
DS
1407 hrtimer_get_softirq_time(cpu_base);
1408 gettime = 0;
b75f7a51 1409 }
d3d74453 1410
833883d9 1411 spin_lock(&cpu_base->lock);
c0a31329 1412
833883d9
DS
1413 while ((node = base->first)) {
1414 struct hrtimer *timer;
54cdfdb4 1415
833883d9 1416 timer = rb_entry(node, struct hrtimer, node);
cc584b21
AV
1417 if (base->softirq_time.tv64 <=
1418 hrtimer_get_expires_tv64(timer))
833883d9
DS
1419 break;
1420
c6a2a177 1421 __run_hrtimer(timer, &base->softirq_time);
833883d9
DS
1422 }
1423 spin_unlock(&cpu_base->lock);
1424 }
c0a31329
TG
1425}
1426
10c94ec1
TG
1427/*
1428 * Sleep related functions:
1429 */
c9cb2e3d 1430static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
00362e33
TG
1431{
1432 struct hrtimer_sleeper *t =
1433 container_of(timer, struct hrtimer_sleeper, timer);
1434 struct task_struct *task = t->task;
1435
1436 t->task = NULL;
1437 if (task)
1438 wake_up_process(task);
1439
1440 return HRTIMER_NORESTART;
1441}
1442
36c8b586 1443void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
00362e33
TG
1444{
1445 sl->timer.function = hrtimer_wakeup;
1446 sl->task = task;
1447}
2bc481cf 1448EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
00362e33 1449
669d7868 1450static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
432569bb 1451{
669d7868 1452 hrtimer_init_sleeper(t, current);
10c94ec1 1453
432569bb
RZ
1454 do {
1455 set_current_state(TASK_INTERRUPTIBLE);
cc584b21 1456 hrtimer_start_expires(&t->timer, mode);
37bb6cb4
PZ
1457 if (!hrtimer_active(&t->timer))
1458 t->task = NULL;
432569bb 1459
54cdfdb4
TG
1460 if (likely(t->task))
1461 schedule();
432569bb 1462
669d7868 1463 hrtimer_cancel(&t->timer);
c9cb2e3d 1464 mode = HRTIMER_MODE_ABS;
669d7868
TG
1465
1466 } while (t->task && !signal_pending(current));
432569bb 1467
3588a085
PZ
1468 __set_current_state(TASK_RUNNING);
1469
669d7868 1470 return t->task == NULL;
10c94ec1
TG
1471}
1472
080344b9
ON
1473static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1474{
1475 struct timespec rmt;
1476 ktime_t rem;
1477
cc584b21 1478 rem = hrtimer_expires_remaining(timer);
080344b9
ON
1479 if (rem.tv64 <= 0)
1480 return 0;
1481 rmt = ktime_to_timespec(rem);
1482
1483 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1484 return -EFAULT;
1485
1486 return 1;
1487}
1488
1711ef38 1489long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
10c94ec1 1490{
669d7868 1491 struct hrtimer_sleeper t;
080344b9 1492 struct timespec __user *rmtp;
237fc6e7 1493 int ret = 0;
10c94ec1 1494
237fc6e7
TG
1495 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
1496 HRTIMER_MODE_ABS);
cc584b21 1497 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
10c94ec1 1498
c9cb2e3d 1499 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
237fc6e7 1500 goto out;
10c94ec1 1501
029a07e0 1502 rmtp = restart->nanosleep.rmtp;
432569bb 1503 if (rmtp) {
237fc6e7 1504 ret = update_rmtp(&t.timer, rmtp);
080344b9 1505 if (ret <= 0)
237fc6e7 1506 goto out;
432569bb 1507 }
10c94ec1 1508
10c94ec1 1509 /* The other values in restart are already filled in */
237fc6e7
TG
1510 ret = -ERESTART_RESTARTBLOCK;
1511out:
1512 destroy_hrtimer_on_stack(&t.timer);
1513 return ret;
10c94ec1
TG
1514}
1515
080344b9 1516long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
10c94ec1
TG
1517 const enum hrtimer_mode mode, const clockid_t clockid)
1518{
1519 struct restart_block *restart;
669d7868 1520 struct hrtimer_sleeper t;
237fc6e7 1521 int ret = 0;
3bd01206
AV
1522 unsigned long slack;
1523
1524 slack = current->timer_slack_ns;
1525 if (rt_task(current))
1526 slack = 0;
10c94ec1 1527
237fc6e7 1528 hrtimer_init_on_stack(&t.timer, clockid, mode);
3bd01206 1529 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
432569bb 1530 if (do_nanosleep(&t, mode))
237fc6e7 1531 goto out;
10c94ec1 1532
7978672c 1533 /* Absolute timers do not update the rmtp value and restart: */
237fc6e7
TG
1534 if (mode == HRTIMER_MODE_ABS) {
1535 ret = -ERESTARTNOHAND;
1536 goto out;
1537 }
10c94ec1 1538
432569bb 1539 if (rmtp) {
237fc6e7 1540 ret = update_rmtp(&t.timer, rmtp);
080344b9 1541 if (ret <= 0)
237fc6e7 1542 goto out;
432569bb 1543 }
10c94ec1
TG
1544
1545 restart = &current_thread_info()->restart_block;
1711ef38 1546 restart->fn = hrtimer_nanosleep_restart;
029a07e0
TG
1547 restart->nanosleep.index = t.timer.base->index;
1548 restart->nanosleep.rmtp = rmtp;
cc584b21 1549 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
10c94ec1 1550
237fc6e7
TG
1551 ret = -ERESTART_RESTARTBLOCK;
1552out:
1553 destroy_hrtimer_on_stack(&t.timer);
1554 return ret;
10c94ec1
TG
1555}
1556
58fd3aa2
HC
1557SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1558 struct timespec __user *, rmtp)
6ba1b912 1559{
080344b9 1560 struct timespec tu;
6ba1b912
TG
1561
1562 if (copy_from_user(&tu, rqtp, sizeof(tu)))
1563 return -EFAULT;
1564
1565 if (!timespec_valid(&tu))
1566 return -EINVAL;
1567
080344b9 1568 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
6ba1b912
TG
1569}
1570
c0a31329
TG
1571/*
1572 * Functions related to boot-time initialization:
1573 */
0ec160dd 1574static void __cpuinit init_hrtimers_cpu(int cpu)
c0a31329 1575{
3c8aa39d 1576 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
c0a31329
TG
1577 int i;
1578
3c8aa39d 1579 spin_lock_init(&cpu_base->lock);
3c8aa39d
TG
1580
1581 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1582 cpu_base->clock_base[i].cpu_base = cpu_base;
1583
54cdfdb4 1584 hrtimer_init_hres(cpu_base);
c0a31329
TG
1585}
1586
1587#ifdef CONFIG_HOTPLUG_CPU
1588
ca109491 1589static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
37810659 1590 struct hrtimer_clock_base *new_base)
c0a31329
TG
1591{
1592 struct hrtimer *timer;
1593 struct rb_node *node;
1594
1595 while ((node = rb_first(&old_base->active))) {
1596 timer = rb_entry(node, struct hrtimer, node);
54cdfdb4 1597 BUG_ON(hrtimer_callback_running(timer));
c6a2a177 1598 debug_deactivate(timer);
b00c1a99
TG
1599
1600 /*
1601 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1602 * timer could be seen as !active and just vanish away
1603 * under us on another CPU
1604 */
1605 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
c0a31329 1606 timer->base = new_base;
54cdfdb4 1607 /*
e3f1d883
TG
1608 * Enqueue the timers on the new cpu. This does not
1609 * reprogram the event device in case the timer
1610 * expires before the earliest on this CPU, but we run
1611 * hrtimer_interrupt after we migrated everything to
1612 * sort out already expired timers and reprogram the
1613 * event device.
54cdfdb4 1614 */
a6037b61 1615 enqueue_hrtimer(timer, new_base);
41e1022e 1616
b00c1a99
TG
1617 /* Clear the migration state bit */
1618 timer->state &= ~HRTIMER_STATE_MIGRATE;
c0a31329
TG
1619 }
1620}
1621
d5fd43c4 1622static void migrate_hrtimers(int scpu)
c0a31329 1623{
3c8aa39d 1624 struct hrtimer_cpu_base *old_base, *new_base;
731a55ba 1625 int i;
c0a31329 1626
37810659 1627 BUG_ON(cpu_online(scpu));
37810659 1628 tick_cancel_sched_timer(scpu);
731a55ba
TG
1629
1630 local_irq_disable();
1631 old_base = &per_cpu(hrtimer_bases, scpu);
1632 new_base = &__get_cpu_var(hrtimer_bases);
d82f0b0f
ON
1633 /*
1634 * The caller is globally serialized and nobody else
1635 * takes two locks at once, deadlock is not possible.
1636 */
731a55ba 1637 spin_lock(&new_base->lock);
8e60e05f 1638 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
c0a31329 1639
3c8aa39d 1640 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
ca109491 1641 migrate_hrtimer_list(&old_base->clock_base[i],
37810659 1642 &new_base->clock_base[i]);
c0a31329
TG
1643 }
1644
8e60e05f 1645 spin_unlock(&old_base->lock);
731a55ba 1646 spin_unlock(&new_base->lock);
37810659 1647
731a55ba
TG
1648 /* Check, if we got expired work to do */
1649 __hrtimer_peek_ahead_timers();
1650 local_irq_enable();
c0a31329 1651}
37810659 1652
c0a31329
TG
1653#endif /* CONFIG_HOTPLUG_CPU */
1654
8c78f307 1655static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
c0a31329
TG
1656 unsigned long action, void *hcpu)
1657{
b2e3c0ad 1658 int scpu = (long)hcpu;
c0a31329
TG
1659
1660 switch (action) {
1661
1662 case CPU_UP_PREPARE:
8bb78442 1663 case CPU_UP_PREPARE_FROZEN:
37810659 1664 init_hrtimers_cpu(scpu);
c0a31329
TG
1665 break;
1666
1667#ifdef CONFIG_HOTPLUG_CPU
94df7de0
SD
1668 case CPU_DYING:
1669 case CPU_DYING_FROZEN:
1670 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1671 break;
c0a31329 1672 case CPU_DEAD:
8bb78442 1673 case CPU_DEAD_FROZEN:
b2e3c0ad 1674 {
37810659 1675 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
d5fd43c4 1676 migrate_hrtimers(scpu);
c0a31329 1677 break;
b2e3c0ad 1678 }
c0a31329
TG
1679#endif
1680
1681 default:
1682 break;
1683 }
1684
1685 return NOTIFY_OK;
1686}
1687
8c78f307 1688static struct notifier_block __cpuinitdata hrtimers_nb = {
c0a31329
TG
1689 .notifier_call = hrtimer_cpu_notify,
1690};
1691
1692void __init hrtimers_init(void)
1693{
1694 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1695 (void *)(long)smp_processor_id());
1696 register_cpu_notifier(&hrtimers_nb);
a6037b61
PZ
1697#ifdef CONFIG_HIGH_RES_TIMERS
1698 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1699#endif
c0a31329
TG
1700}
1701
7bb67439 1702/**
654c8e0b 1703 * schedule_hrtimeout_range - sleep until timeout
7bb67439 1704 * @expires: timeout value (ktime_t)
654c8e0b 1705 * @delta: slack in expires timeout (ktime_t)
7bb67439
AV
1706 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1707 *
1708 * Make the current task sleep until the given expiry time has
1709 * elapsed. The routine will return immediately unless
1710 * the current task state has been set (see set_current_state()).
1711 *
654c8e0b
AV
1712 * The @delta argument gives the kernel the freedom to schedule the
1713 * actual wakeup to a time that is both power and performance friendly.
1714 * The kernel give the normal best effort behavior for "@expires+@delta",
1715 * but may decide to fire the timer earlier, but no earlier than @expires.
1716 *
7bb67439
AV
1717 * You can set the task state as follows -
1718 *
1719 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1720 * pass before the routine returns.
1721 *
1722 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1723 * delivered to the current task.
1724 *
1725 * The current task state is guaranteed to be TASK_RUNNING when this
1726 * routine returns.
1727 *
1728 * Returns 0 when the timer has expired otherwise -EINTR
1729 */
654c8e0b 1730int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
7bb67439
AV
1731 const enum hrtimer_mode mode)
1732{
1733 struct hrtimer_sleeper t;
1734
1735 /*
1736 * Optimize when a zero timeout value is given. It does not
1737 * matter whether this is an absolute or a relative time.
1738 */
1739 if (expires && !expires->tv64) {
1740 __set_current_state(TASK_RUNNING);
1741 return 0;
1742 }
1743
1744 /*
1745 * A NULL parameter means "inifinte"
1746 */
1747 if (!expires) {
1748 schedule();
1749 __set_current_state(TASK_RUNNING);
1750 return -EINTR;
1751 }
1752
1753 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
654c8e0b 1754 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
7bb67439
AV
1755
1756 hrtimer_init_sleeper(&t, current);
1757
cc584b21 1758 hrtimer_start_expires(&t.timer, mode);
7bb67439
AV
1759 if (!hrtimer_active(&t.timer))
1760 t.task = NULL;
1761
1762 if (likely(t.task))
1763 schedule();
1764
1765 hrtimer_cancel(&t.timer);
1766 destroy_hrtimer_on_stack(&t.timer);
1767
1768 __set_current_state(TASK_RUNNING);
1769
1770 return !t.task ? 0 : -EINTR;
1771}
654c8e0b
AV
1772EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1773
1774/**
1775 * schedule_hrtimeout - sleep until timeout
1776 * @expires: timeout value (ktime_t)
1777 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1778 *
1779 * Make the current task sleep until the given expiry time has
1780 * elapsed. The routine will return immediately unless
1781 * the current task state has been set (see set_current_state()).
1782 *
1783 * You can set the task state as follows -
1784 *
1785 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1786 * pass before the routine returns.
1787 *
1788 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1789 * delivered to the current task.
1790 *
1791 * The current task state is guaranteed to be TASK_RUNNING when this
1792 * routine returns.
1793 *
1794 * Returns 0 when the timer has expired otherwise -EINTR
1795 */
1796int __sched schedule_hrtimeout(ktime_t *expires,
1797 const enum hrtimer_mode mode)
1798{
1799 return schedule_hrtimeout_range(expires, 0, mode);
1800}
7bb67439 1801EXPORT_SYMBOL_GPL(schedule_hrtimeout);