]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/hrtimer.c | |
3 | * | |
4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> | |
5 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
6 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
7 | * | |
8 | * High-resolution kernel timers | |
9 | * | |
10 | * In contrast to the low-resolution timeout API implemented in | |
11 | * kernel/timer.c, hrtimers provide finer resolution and accuracy | |
12 | * depending on system configuration and capabilities. | |
13 | * | |
14 | * These timers are currently used for: | |
15 | * - itimers | |
16 | * - POSIX timers | |
17 | * - nanosleep | |
18 | * - precise in-kernel timing | |
19 | * | |
20 | * Started by: Thomas Gleixner and Ingo Molnar | |
21 | * | |
22 | * Credits: | |
23 | * based on kernel/timer.c | |
24 | * | |
25 | * Help, testing, suggestions, bugfixes, improvements were | |
26 | * provided by: | |
27 | * | |
28 | * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel | |
29 | * et. al. | |
30 | * | |
31 | * For licencing details see kernel-base/COPYING | |
32 | */ | |
33 | ||
34 | #include <linux/cpu.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/percpu.h> | |
37 | #include <linux/hrtimer.h> | |
38 | #include <linux/notifier.h> | |
39 | #include <linux/syscalls.h> | |
40 | #include <linux/kallsyms.h> | |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/tick.h> | |
43 | #include <linux/seq_file.h> | |
44 | #include <linux/err.h> | |
45 | #include <linux/debugobjects.h> | |
46 | #include <linux/sched.h> | |
47 | #include <linux/timer.h> | |
48 | ||
49 | #include <asm/uaccess.h> | |
50 | ||
51 | #include <trace/events/timer.h> | |
52 | ||
53 | /* | |
54 | * The timer bases: | |
55 | * | |
56 | * Note: If we want to add new timer bases, we have to skip the two | |
57 | * clock ids captured by the cpu-timers. We do this by holding empty | |
58 | * entries rather than doing math adjustment of the clock ids. | |
59 | * This ensures that we capture erroneous accesses to these clock ids | |
60 | * rather than moving them into the range of valid clock id's. | |
61 | */ | |
62 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |
63 | { | |
64 | ||
65 | .clock_base = | |
66 | { | |
67 | { | |
68 | .index = CLOCK_REALTIME, | |
69 | .get_time = &ktime_get_real, | |
70 | .resolution = KTIME_LOW_RES, | |
71 | }, | |
72 | { | |
73 | .index = CLOCK_MONOTONIC, | |
74 | .get_time = &ktime_get, | |
75 | .resolution = KTIME_LOW_RES, | |
76 | }, | |
77 | } | |
78 | }; | |
79 | ||
80 | /* | |
81 | * Get the coarse grained time at the softirq based on xtime and | |
82 | * wall_to_monotonic. | |
83 | */ | |
84 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | |
85 | { | |
86 | ktime_t xtim, tomono; | |
87 | struct timespec xts, tom; | |
88 | unsigned long seq; | |
89 | ||
90 | do { | |
91 | seq = read_seqbegin(&xtime_lock); | |
92 | xts = __current_kernel_time(); | |
93 | tom = __get_wall_to_monotonic(); | |
94 | } while (read_seqretry(&xtime_lock, seq)); | |
95 | ||
96 | xtim = timespec_to_ktime(xts); | |
97 | tomono = timespec_to_ktime(tom); | |
98 | base->clock_base[CLOCK_REALTIME].softirq_time = xtim; | |
99 | base->clock_base[CLOCK_MONOTONIC].softirq_time = | |
100 | ktime_add(xtim, tomono); | |
101 | } | |
102 | ||
103 | /* | |
104 | * Functions and macros which are different for UP/SMP systems are kept in a | |
105 | * single place | |
106 | */ | |
107 | #ifdef CONFIG_SMP | |
108 | ||
109 | /* | |
110 | * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock | |
111 | * means that all timers which are tied to this base via timer->base are | |
112 | * locked, and the base itself is locked too. | |
113 | * | |
114 | * So __run_timers/migrate_timers can safely modify all timers which could | |
115 | * be found on the lists/queues. | |
116 | * | |
117 | * When the timer's base is locked, and the timer removed from list, it is | |
118 | * possible to set timer->base = NULL and drop the lock: the timer remains | |
119 | * locked. | |
120 | */ | |
121 | static | |
122 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |
123 | unsigned long *flags) | |
124 | { | |
125 | struct hrtimer_clock_base *base; | |
126 | ||
127 | for (;;) { | |
128 | base = timer->base; | |
129 | if (likely(base != NULL)) { | |
130 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); | |
131 | if (likely(base == timer->base)) | |
132 | return base; | |
133 | /* The timer has migrated to another CPU: */ | |
134 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | |
135 | } | |
136 | cpu_relax(); | |
137 | } | |
138 | } | |
139 | ||
140 | ||
141 | /* | |
142 | * Get the preferred target CPU for NOHZ | |
143 | */ | |
144 | static int hrtimer_get_target(int this_cpu, int pinned) | |
145 | { | |
146 | #ifdef CONFIG_NO_HZ | |
147 | if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) | |
148 | return get_nohz_timer_target(); | |
149 | #endif | |
150 | return this_cpu; | |
151 | } | |
152 | ||
153 | /* | |
154 | * With HIGHRES=y we do not migrate the timer when it is expiring | |
155 | * before the next event on the target cpu because we cannot reprogram | |
156 | * the target cpu hardware and we would cause it to fire late. | |
157 | * | |
158 | * Called with cpu_base->lock of target cpu held. | |
159 | */ | |
160 | static int | |
161 | hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | |
162 | { | |
163 | #ifdef CONFIG_HIGH_RES_TIMERS | |
164 | ktime_t expires; | |
165 | ||
166 | if (!new_base->cpu_base->hres_active) | |
167 | return 0; | |
168 | ||
169 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); | |
170 | return expires.tv64 <= new_base->cpu_base->expires_next.tv64; | |
171 | #else | |
172 | return 0; | |
173 | #endif | |
174 | } | |
175 | ||
176 | /* | |
177 | * Switch the timer base to the current CPU when possible. | |
178 | */ | |
179 | static inline struct hrtimer_clock_base * | |
180 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |
181 | int pinned) | |
182 | { | |
183 | struct hrtimer_clock_base *new_base; | |
184 | struct hrtimer_cpu_base *new_cpu_base; | |
185 | int this_cpu = smp_processor_id(); | |
186 | int cpu = hrtimer_get_target(this_cpu, pinned); | |
187 | ||
188 | again: | |
189 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); | |
190 | new_base = &new_cpu_base->clock_base[base->index]; | |
191 | ||
192 | if (base != new_base) { | |
193 | /* | |
194 | * We are trying to move timer to new_base. | |
195 | * However we can't change timer's base while it is running, | |
196 | * so we keep it on the same CPU. No hassle vs. reprogramming | |
197 | * the event source in the high resolution case. The softirq | |
198 | * code will take care of this when the timer function has | |
199 | * completed. There is no conflict as we hold the lock until | |
200 | * the timer is enqueued. | |
201 | */ | |
202 | if (unlikely(hrtimer_callback_running(timer))) | |
203 | return base; | |
204 | ||
205 | /* See the comment in lock_timer_base() */ | |
206 | timer->base = NULL; | |
207 | raw_spin_unlock(&base->cpu_base->lock); | |
208 | raw_spin_lock(&new_base->cpu_base->lock); | |
209 | ||
210 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | |
211 | cpu = this_cpu; | |
212 | raw_spin_unlock(&new_base->cpu_base->lock); | |
213 | raw_spin_lock(&base->cpu_base->lock); | |
214 | timer->base = base; | |
215 | goto again; | |
216 | } | |
217 | timer->base = new_base; | |
218 | } | |
219 | return new_base; | |
220 | } | |
221 | ||
222 | #else /* CONFIG_SMP */ | |
223 | ||
224 | static inline struct hrtimer_clock_base * | |
225 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |
226 | { | |
227 | struct hrtimer_clock_base *base = timer->base; | |
228 | ||
229 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); | |
230 | ||
231 | return base; | |
232 | } | |
233 | ||
234 | # define switch_hrtimer_base(t, b, p) (b) | |
235 | ||
236 | #endif /* !CONFIG_SMP */ | |
237 | ||
238 | /* | |
239 | * Functions for the union type storage format of ktime_t which are | |
240 | * too large for inlining: | |
241 | */ | |
242 | #if BITS_PER_LONG < 64 | |
243 | # ifndef CONFIG_KTIME_SCALAR | |
244 | /** | |
245 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | |
246 | * @kt: addend | |
247 | * @nsec: the scalar nsec value to add | |
248 | * | |
249 | * Returns the sum of kt and nsec in ktime_t format | |
250 | */ | |
251 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | |
252 | { | |
253 | ktime_t tmp; | |
254 | ||
255 | if (likely(nsec < NSEC_PER_SEC)) { | |
256 | tmp.tv64 = nsec; | |
257 | } else { | |
258 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | |
259 | ||
260 | tmp = ktime_set((long)nsec, rem); | |
261 | } | |
262 | ||
263 | return ktime_add(kt, tmp); | |
264 | } | |
265 | ||
266 | EXPORT_SYMBOL_GPL(ktime_add_ns); | |
267 | ||
268 | /** | |
269 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable | |
270 | * @kt: minuend | |
271 | * @nsec: the scalar nsec value to subtract | |
272 | * | |
273 | * Returns the subtraction of @nsec from @kt in ktime_t format | |
274 | */ | |
275 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) | |
276 | { | |
277 | ktime_t tmp; | |
278 | ||
279 | if (likely(nsec < NSEC_PER_SEC)) { | |
280 | tmp.tv64 = nsec; | |
281 | } else { | |
282 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | |
283 | ||
284 | tmp = ktime_set((long)nsec, rem); | |
285 | } | |
286 | ||
287 | return ktime_sub(kt, tmp); | |
288 | } | |
289 | ||
290 | EXPORT_SYMBOL_GPL(ktime_sub_ns); | |
291 | # endif /* !CONFIG_KTIME_SCALAR */ | |
292 | ||
293 | /* | |
294 | * Divide a ktime value by a nanosecond value | |
295 | */ | |
296 | u64 ktime_divns(const ktime_t kt, s64 div) | |
297 | { | |
298 | u64 dclc; | |
299 | int sft = 0; | |
300 | ||
301 | dclc = ktime_to_ns(kt); | |
302 | /* Make sure the divisor is less than 2^32: */ | |
303 | while (div >> 32) { | |
304 | sft++; | |
305 | div >>= 1; | |
306 | } | |
307 | dclc >>= sft; | |
308 | do_div(dclc, (unsigned long) div); | |
309 | ||
310 | return dclc; | |
311 | } | |
312 | #endif /* BITS_PER_LONG >= 64 */ | |
313 | ||
314 | /* | |
315 | * Add two ktime values and do a safety check for overflow: | |
316 | */ | |
317 | ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | |
318 | { | |
319 | ktime_t res = ktime_add(lhs, rhs); | |
320 | ||
321 | /* | |
322 | * We use KTIME_SEC_MAX here, the maximum timeout which we can | |
323 | * return to user space in a timespec: | |
324 | */ | |
325 | if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) | |
326 | res = ktime_set(KTIME_SEC_MAX, 0); | |
327 | ||
328 | return res; | |
329 | } | |
330 | ||
331 | EXPORT_SYMBOL_GPL(ktime_add_safe); | |
332 | ||
333 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS | |
334 | ||
335 | static struct debug_obj_descr hrtimer_debug_descr; | |
336 | ||
337 | /* | |
338 | * fixup_init is called when: | |
339 | * - an active object is initialized | |
340 | */ | |
341 | static int hrtimer_fixup_init(void *addr, enum debug_obj_state state) | |
342 | { | |
343 | struct hrtimer *timer = addr; | |
344 | ||
345 | switch (state) { | |
346 | case ODEBUG_STATE_ACTIVE: | |
347 | hrtimer_cancel(timer); | |
348 | debug_object_init(timer, &hrtimer_debug_descr); | |
349 | return 1; | |
350 | default: | |
351 | return 0; | |
352 | } | |
353 | } | |
354 | ||
355 | /* | |
356 | * fixup_activate is called when: | |
357 | * - an active object is activated | |
358 | * - an unknown object is activated (might be a statically initialized object) | |
359 | */ | |
360 | static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state) | |
361 | { | |
362 | switch (state) { | |
363 | ||
364 | case ODEBUG_STATE_NOTAVAILABLE: | |
365 | WARN_ON_ONCE(1); | |
366 | return 0; | |
367 | ||
368 | case ODEBUG_STATE_ACTIVE: | |
369 | WARN_ON(1); | |
370 | ||
371 | default: | |
372 | return 0; | |
373 | } | |
374 | } | |
375 | ||
376 | /* | |
377 | * fixup_free is called when: | |
378 | * - an active object is freed | |
379 | */ | |
380 | static int hrtimer_fixup_free(void *addr, enum debug_obj_state state) | |
381 | { | |
382 | struct hrtimer *timer = addr; | |
383 | ||
384 | switch (state) { | |
385 | case ODEBUG_STATE_ACTIVE: | |
386 | hrtimer_cancel(timer); | |
387 | debug_object_free(timer, &hrtimer_debug_descr); | |
388 | return 1; | |
389 | default: | |
390 | return 0; | |
391 | } | |
392 | } | |
393 | ||
394 | static struct debug_obj_descr hrtimer_debug_descr = { | |
395 | .name = "hrtimer", | |
396 | .fixup_init = hrtimer_fixup_init, | |
397 | .fixup_activate = hrtimer_fixup_activate, | |
398 | .fixup_free = hrtimer_fixup_free, | |
399 | }; | |
400 | ||
401 | static inline void debug_hrtimer_init(struct hrtimer *timer) | |
402 | { | |
403 | debug_object_init(timer, &hrtimer_debug_descr); | |
404 | } | |
405 | ||
406 | static inline void debug_hrtimer_activate(struct hrtimer *timer) | |
407 | { | |
408 | debug_object_activate(timer, &hrtimer_debug_descr); | |
409 | } | |
410 | ||
411 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) | |
412 | { | |
413 | debug_object_deactivate(timer, &hrtimer_debug_descr); | |
414 | } | |
415 | ||
416 | static inline void debug_hrtimer_free(struct hrtimer *timer) | |
417 | { | |
418 | debug_object_free(timer, &hrtimer_debug_descr); | |
419 | } | |
420 | ||
421 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
422 | enum hrtimer_mode mode); | |
423 | ||
424 | void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id, | |
425 | enum hrtimer_mode mode) | |
426 | { | |
427 | debug_object_init_on_stack(timer, &hrtimer_debug_descr); | |
428 | __hrtimer_init(timer, clock_id, mode); | |
429 | } | |
430 | EXPORT_SYMBOL_GPL(hrtimer_init_on_stack); | |
431 | ||
432 | void destroy_hrtimer_on_stack(struct hrtimer *timer) | |
433 | { | |
434 | debug_object_free(timer, &hrtimer_debug_descr); | |
435 | } | |
436 | ||
437 | #else | |
438 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } | |
439 | static inline void debug_hrtimer_activate(struct hrtimer *timer) { } | |
440 | static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } | |
441 | #endif | |
442 | ||
443 | static inline void | |
444 | debug_init(struct hrtimer *timer, clockid_t clockid, | |
445 | enum hrtimer_mode mode) | |
446 | { | |
447 | debug_hrtimer_init(timer); | |
448 | trace_hrtimer_init(timer, clockid, mode); | |
449 | } | |
450 | ||
451 | static inline void debug_activate(struct hrtimer *timer) | |
452 | { | |
453 | debug_hrtimer_activate(timer); | |
454 | trace_hrtimer_start(timer); | |
455 | } | |
456 | ||
457 | static inline void debug_deactivate(struct hrtimer *timer) | |
458 | { | |
459 | debug_hrtimer_deactivate(timer); | |
460 | trace_hrtimer_cancel(timer); | |
461 | } | |
462 | ||
463 | /* High resolution timer related functions */ | |
464 | #ifdef CONFIG_HIGH_RES_TIMERS | |
465 | ||
466 | /* | |
467 | * High resolution timer enabled ? | |
468 | */ | |
469 | static int hrtimer_hres_enabled __read_mostly = 1; | |
470 | ||
471 | /* | |
472 | * Enable / Disable high resolution mode | |
473 | */ | |
474 | static int __init setup_hrtimer_hres(char *str) | |
475 | { | |
476 | if (!strcmp(str, "off")) | |
477 | hrtimer_hres_enabled = 0; | |
478 | else if (!strcmp(str, "on")) | |
479 | hrtimer_hres_enabled = 1; | |
480 | else | |
481 | return 0; | |
482 | return 1; | |
483 | } | |
484 | ||
485 | __setup("highres=", setup_hrtimer_hres); | |
486 | ||
487 | /* | |
488 | * hrtimer_high_res_enabled - query, if the highres mode is enabled | |
489 | */ | |
490 | static inline int hrtimer_is_hres_enabled(void) | |
491 | { | |
492 | return hrtimer_hres_enabled; | |
493 | } | |
494 | ||
495 | /* | |
496 | * Is the high resolution mode active ? | |
497 | */ | |
498 | static inline int hrtimer_hres_active(void) | |
499 | { | |
500 | return __get_cpu_var(hrtimer_bases).hres_active; | |
501 | } | |
502 | ||
503 | /* | |
504 | * Reprogram the event source with checking both queues for the | |
505 | * next event | |
506 | * Called with interrupts disabled and base->lock held | |
507 | */ | |
508 | static void | |
509 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |
510 | { | |
511 | int i; | |
512 | struct hrtimer_clock_base *base = cpu_base->clock_base; | |
513 | ktime_t expires, expires_next; | |
514 | ||
515 | expires_next.tv64 = KTIME_MAX; | |
516 | ||
517 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | |
518 | struct hrtimer *timer; | |
519 | ||
520 | if (!base->first) | |
521 | continue; | |
522 | timer = rb_entry(base->first, struct hrtimer, node); | |
523 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | |
524 | /* | |
525 | * clock_was_set() has changed base->offset so the | |
526 | * result might be negative. Fix it up to prevent a | |
527 | * false positive in clockevents_program_event() | |
528 | */ | |
529 | if (expires.tv64 < 0) | |
530 | expires.tv64 = 0; | |
531 | if (expires.tv64 < expires_next.tv64) | |
532 | expires_next = expires; | |
533 | } | |
534 | ||
535 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) | |
536 | return; | |
537 | ||
538 | cpu_base->expires_next.tv64 = expires_next.tv64; | |
539 | ||
540 | if (cpu_base->expires_next.tv64 != KTIME_MAX) | |
541 | tick_program_event(cpu_base->expires_next, 1); | |
542 | } | |
543 | ||
544 | /* | |
545 | * Shared reprogramming for clock_realtime and clock_monotonic | |
546 | * | |
547 | * When a timer is enqueued and expires earlier than the already enqueued | |
548 | * timers, we have to check, whether it expires earlier than the timer for | |
549 | * which the clock event device was armed. | |
550 | * | |
551 | * Called with interrupts disabled and base->cpu_base.lock held | |
552 | */ | |
553 | static int hrtimer_reprogram(struct hrtimer *timer, | |
554 | struct hrtimer_clock_base *base) | |
555 | { | |
556 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | |
557 | ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | |
558 | int res; | |
559 | ||
560 | WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0); | |
561 | ||
562 | /* | |
563 | * When the callback is running, we do not reprogram the clock event | |
564 | * device. The timer callback is either running on a different CPU or | |
565 | * the callback is executed in the hrtimer_interrupt context. The | |
566 | * reprogramming is handled either by the softirq, which called the | |
567 | * callback or at the end of the hrtimer_interrupt. | |
568 | */ | |
569 | if (hrtimer_callback_running(timer)) | |
570 | return 0; | |
571 | ||
572 | /* | |
573 | * CLOCK_REALTIME timer might be requested with an absolute | |
574 | * expiry time which is less than base->offset. Nothing wrong | |
575 | * about that, just avoid to call into the tick code, which | |
576 | * has now objections against negative expiry values. | |
577 | */ | |
578 | if (expires.tv64 < 0) | |
579 | return -ETIME; | |
580 | ||
581 | if (expires.tv64 >= cpu_base->expires_next.tv64) | |
582 | return 0; | |
583 | ||
584 | /* | |
585 | * If a hang was detected in the last timer interrupt then we | |
586 | * do not schedule a timer which is earlier than the expiry | |
587 | * which we enforced in the hang detection. We want the system | |
588 | * to make progress. | |
589 | */ | |
590 | if (cpu_base->hang_detected) | |
591 | return 0; | |
592 | ||
593 | /* | |
594 | * Clockevents returns -ETIME, when the event was in the past. | |
595 | */ | |
596 | res = tick_program_event(expires, 0); | |
597 | if (!IS_ERR_VALUE(res)) | |
598 | cpu_base->expires_next = expires; | |
599 | return res; | |
600 | } | |
601 | ||
602 | ||
603 | /* | |
604 | * Retrigger next event is called after clock was set | |
605 | * | |
606 | * Called with interrupts disabled via on_each_cpu() | |
607 | */ | |
608 | static void retrigger_next_event(void *arg) | |
609 | { | |
610 | struct hrtimer_cpu_base *base; | |
611 | struct timespec realtime_offset, wtm; | |
612 | unsigned long seq; | |
613 | ||
614 | if (!hrtimer_hres_active()) | |
615 | return; | |
616 | ||
617 | do { | |
618 | seq = read_seqbegin(&xtime_lock); | |
619 | wtm = __get_wall_to_monotonic(); | |
620 | } while (read_seqretry(&xtime_lock, seq)); | |
621 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | |
622 | ||
623 | base = &__get_cpu_var(hrtimer_bases); | |
624 | ||
625 | /* Adjust CLOCK_REALTIME offset */ | |
626 | raw_spin_lock(&base->lock); | |
627 | base->clock_base[CLOCK_REALTIME].offset = | |
628 | timespec_to_ktime(realtime_offset); | |
629 | ||
630 | hrtimer_force_reprogram(base, 0); | |
631 | raw_spin_unlock(&base->lock); | |
632 | } | |
633 | ||
634 | /* | |
635 | * Clock realtime was set | |
636 | * | |
637 | * Change the offset of the realtime clock vs. the monotonic | |
638 | * clock. | |
639 | * | |
640 | * We might have to reprogram the high resolution timer interrupt. On | |
641 | * SMP we call the architecture specific code to retrigger _all_ high | |
642 | * resolution timer interrupts. On UP we just disable interrupts and | |
643 | * call the high resolution interrupt code. | |
644 | */ | |
645 | void clock_was_set(void) | |
646 | { | |
647 | /* Retrigger the CPU local events everywhere */ | |
648 | on_each_cpu(retrigger_next_event, NULL, 1); | |
649 | } | |
650 | ||
651 | /* | |
652 | * During resume we might have to reprogram the high resolution timer | |
653 | * interrupt (on the local CPU): | |
654 | */ | |
655 | void hres_timers_resume(void) | |
656 | { | |
657 | WARN_ONCE(!irqs_disabled(), | |
658 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | |
659 | ||
660 | retrigger_next_event(NULL); | |
661 | } | |
662 | ||
663 | /* | |
664 | * Initialize the high resolution related parts of cpu_base | |
665 | */ | |
666 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | |
667 | { | |
668 | base->expires_next.tv64 = KTIME_MAX; | |
669 | base->hres_active = 0; | |
670 | } | |
671 | ||
672 | /* | |
673 | * Initialize the high resolution related parts of a hrtimer | |
674 | */ | |
675 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | |
676 | { | |
677 | } | |
678 | ||
679 | ||
680 | /* | |
681 | * When High resolution timers are active, try to reprogram. Note, that in case | |
682 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | |
683 | * check happens. The timer gets enqueued into the rbtree. The reprogramming | |
684 | * and expiry check is done in the hrtimer_interrupt or in the softirq. | |
685 | */ | |
686 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |
687 | struct hrtimer_clock_base *base, | |
688 | int wakeup) | |
689 | { | |
690 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | |
691 | if (wakeup) { | |
692 | raw_spin_unlock(&base->cpu_base->lock); | |
693 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
694 | raw_spin_lock(&base->cpu_base->lock); | |
695 | } else | |
696 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | |
697 | ||
698 | return 1; | |
699 | } | |
700 | ||
701 | return 0; | |
702 | } | |
703 | ||
704 | /* | |
705 | * Switch to high resolution mode | |
706 | */ | |
707 | static int hrtimer_switch_to_hres(void) | |
708 | { | |
709 | int cpu = smp_processor_id(); | |
710 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); | |
711 | unsigned long flags; | |
712 | ||
713 | if (base->hres_active) | |
714 | return 1; | |
715 | ||
716 | local_irq_save(flags); | |
717 | ||
718 | if (tick_init_highres()) { | |
719 | local_irq_restore(flags); | |
720 | printk(KERN_WARNING "Could not switch to high resolution " | |
721 | "mode on CPU %d\n", cpu); | |
722 | return 0; | |
723 | } | |
724 | base->hres_active = 1; | |
725 | base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; | |
726 | base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES; | |
727 | ||
728 | tick_setup_sched_timer(); | |
729 | ||
730 | /* "Retrigger" the interrupt to get things going */ | |
731 | retrigger_next_event(NULL); | |
732 | local_irq_restore(flags); | |
733 | return 1; | |
734 | } | |
735 | ||
736 | #else | |
737 | ||
738 | static inline int hrtimer_hres_active(void) { return 0; } | |
739 | static inline int hrtimer_is_hres_enabled(void) { return 0; } | |
740 | static inline int hrtimer_switch_to_hres(void) { return 0; } | |
741 | static inline void | |
742 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | |
743 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |
744 | struct hrtimer_clock_base *base, | |
745 | int wakeup) | |
746 | { | |
747 | return 0; | |
748 | } | |
749 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | |
750 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } | |
751 | ||
752 | #endif /* CONFIG_HIGH_RES_TIMERS */ | |
753 | ||
754 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) | |
755 | { | |
756 | #ifdef CONFIG_TIMER_STATS | |
757 | if (timer->start_site) | |
758 | return; | |
759 | timer->start_site = __builtin_return_address(0); | |
760 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | |
761 | timer->start_pid = current->pid; | |
762 | #endif | |
763 | } | |
764 | ||
765 | static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer) | |
766 | { | |
767 | #ifdef CONFIG_TIMER_STATS | |
768 | timer->start_site = NULL; | |
769 | #endif | |
770 | } | |
771 | ||
772 | static inline void timer_stats_account_hrtimer(struct hrtimer *timer) | |
773 | { | |
774 | #ifdef CONFIG_TIMER_STATS | |
775 | if (likely(!timer_stats_active)) | |
776 | return; | |
777 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | |
778 | timer->function, timer->start_comm, 0); | |
779 | #endif | |
780 | } | |
781 | ||
782 | /* | |
783 | * Counterpart to lock_hrtimer_base above: | |
784 | */ | |
785 | static inline | |
786 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |
787 | { | |
788 | raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | |
789 | } | |
790 | ||
791 | /** | |
792 | * hrtimer_forward - forward the timer expiry | |
793 | * @timer: hrtimer to forward | |
794 | * @now: forward past this time | |
795 | * @interval: the interval to forward | |
796 | * | |
797 | * Forward the timer expiry so it will expire in the future. | |
798 | * Returns the number of overruns. | |
799 | */ | |
800 | u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | |
801 | { | |
802 | u64 orun = 1; | |
803 | ktime_t delta; | |
804 | ||
805 | delta = ktime_sub(now, hrtimer_get_expires(timer)); | |
806 | ||
807 | if (delta.tv64 < 0) | |
808 | return 0; | |
809 | ||
810 | if (interval.tv64 < timer->base->resolution.tv64) | |
811 | interval.tv64 = timer->base->resolution.tv64; | |
812 | ||
813 | if (unlikely(delta.tv64 >= interval.tv64)) { | |
814 | s64 incr = ktime_to_ns(interval); | |
815 | ||
816 | orun = ktime_divns(delta, incr); | |
817 | hrtimer_add_expires_ns(timer, incr * orun); | |
818 | if (hrtimer_get_expires_tv64(timer) > now.tv64) | |
819 | return orun; | |
820 | /* | |
821 | * This (and the ktime_add() below) is the | |
822 | * correction for exact: | |
823 | */ | |
824 | orun++; | |
825 | } | |
826 | hrtimer_add_expires(timer, interval); | |
827 | ||
828 | return orun; | |
829 | } | |
830 | EXPORT_SYMBOL_GPL(hrtimer_forward); | |
831 | ||
832 | /* | |
833 | * enqueue_hrtimer - internal function to (re)start a timer | |
834 | * | |
835 | * The timer is inserted in expiry order. Insertion into the | |
836 | * red black tree is O(log(n)). Must hold the base lock. | |
837 | * | |
838 | * Returns 1 when the new timer is the leftmost timer in the tree. | |
839 | */ | |
840 | static int enqueue_hrtimer(struct hrtimer *timer, | |
841 | struct hrtimer_clock_base *base) | |
842 | { | |
843 | struct rb_node **link = &base->active.rb_node; | |
844 | struct rb_node *parent = NULL; | |
845 | struct hrtimer *entry; | |
846 | int leftmost = 1; | |
847 | ||
848 | debug_activate(timer); | |
849 | ||
850 | /* | |
851 | * Find the right place in the rbtree: | |
852 | */ | |
853 | while (*link) { | |
854 | parent = *link; | |
855 | entry = rb_entry(parent, struct hrtimer, node); | |
856 | /* | |
857 | * We dont care about collisions. Nodes with | |
858 | * the same expiry time stay together. | |
859 | */ | |
860 | if (hrtimer_get_expires_tv64(timer) < | |
861 | hrtimer_get_expires_tv64(entry)) { | |
862 | link = &(*link)->rb_left; | |
863 | } else { | |
864 | link = &(*link)->rb_right; | |
865 | leftmost = 0; | |
866 | } | |
867 | } | |
868 | ||
869 | /* | |
870 | * Insert the timer to the rbtree and check whether it | |
871 | * replaces the first pending timer | |
872 | */ | |
873 | if (leftmost) | |
874 | base->first = &timer->node; | |
875 | ||
876 | rb_link_node(&timer->node, parent, link); | |
877 | rb_insert_color(&timer->node, &base->active); | |
878 | /* | |
879 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | |
880 | * state of a possibly running callback. | |
881 | */ | |
882 | timer->state |= HRTIMER_STATE_ENQUEUED; | |
883 | ||
884 | return leftmost; | |
885 | } | |
886 | ||
887 | /* | |
888 | * __remove_hrtimer - internal function to remove a timer | |
889 | * | |
890 | * Caller must hold the base lock. | |
891 | * | |
892 | * High resolution timer mode reprograms the clock event device when the | |
893 | * timer is the one which expires next. The caller can disable this by setting | |
894 | * reprogram to zero. This is useful, when the context does a reprogramming | |
895 | * anyway (e.g. timer interrupt) | |
896 | */ | |
897 | static void __remove_hrtimer(struct hrtimer *timer, | |
898 | struct hrtimer_clock_base *base, | |
899 | unsigned long newstate, int reprogram) | |
900 | { | |
901 | if (!(timer->state & HRTIMER_STATE_ENQUEUED)) | |
902 | goto out; | |
903 | ||
904 | /* | |
905 | * Remove the timer from the rbtree and replace the first | |
906 | * entry pointer if necessary. | |
907 | */ | |
908 | if (base->first == &timer->node) { | |
909 | base->first = rb_next(&timer->node); | |
910 | #ifdef CONFIG_HIGH_RES_TIMERS | |
911 | /* Reprogram the clock event device. if enabled */ | |
912 | if (reprogram && hrtimer_hres_active()) { | |
913 | ktime_t expires; | |
914 | ||
915 | expires = ktime_sub(hrtimer_get_expires(timer), | |
916 | base->offset); | |
917 | if (base->cpu_base->expires_next.tv64 == expires.tv64) | |
918 | hrtimer_force_reprogram(base->cpu_base, 1); | |
919 | } | |
920 | #endif | |
921 | } | |
922 | rb_erase(&timer->node, &base->active); | |
923 | out: | |
924 | timer->state = newstate; | |
925 | } | |
926 | ||
927 | /* | |
928 | * remove hrtimer, called with base lock held | |
929 | */ | |
930 | static inline int | |
931 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) | |
932 | { | |
933 | if (hrtimer_is_queued(timer)) { | |
934 | unsigned long state; | |
935 | int reprogram; | |
936 | ||
937 | /* | |
938 | * Remove the timer and force reprogramming when high | |
939 | * resolution mode is active and the timer is on the current | |
940 | * CPU. If we remove a timer on another CPU, reprogramming is | |
941 | * skipped. The interrupt event on this CPU is fired and | |
942 | * reprogramming happens in the interrupt handler. This is a | |
943 | * rare case and less expensive than a smp call. | |
944 | */ | |
945 | debug_deactivate(timer); | |
946 | timer_stats_hrtimer_clear_start_info(timer); | |
947 | reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); | |
948 | /* | |
949 | * We must preserve the CALLBACK state flag here, | |
950 | * otherwise we could move the timer base in | |
951 | * switch_hrtimer_base. | |
952 | */ | |
953 | state = timer->state & HRTIMER_STATE_CALLBACK; | |
954 | __remove_hrtimer(timer, base, state, reprogram); | |
955 | return 1; | |
956 | } | |
957 | return 0; | |
958 | } | |
959 | ||
960 | int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |
961 | unsigned long delta_ns, const enum hrtimer_mode mode, | |
962 | int wakeup) | |
963 | { | |
964 | struct hrtimer_clock_base *base, *new_base; | |
965 | unsigned long flags; | |
966 | int ret, leftmost; | |
967 | ||
968 | base = lock_hrtimer_base(timer, &flags); | |
969 | ||
970 | /* Remove an active timer from the queue: */ | |
971 | ret = remove_hrtimer(timer, base); | |
972 | ||
973 | /* Switch the timer base, if necessary: */ | |
974 | new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); | |
975 | ||
976 | if (mode & HRTIMER_MODE_REL) { | |
977 | tim = ktime_add_safe(tim, new_base->get_time()); | |
978 | /* | |
979 | * CONFIG_TIME_LOW_RES is a temporary way for architectures | |
980 | * to signal that they simply return xtime in | |
981 | * do_gettimeoffset(). In this case we want to round up by | |
982 | * resolution when starting a relative timer, to avoid short | |
983 | * timeouts. This will go away with the GTOD framework. | |
984 | */ | |
985 | #ifdef CONFIG_TIME_LOW_RES | |
986 | tim = ktime_add_safe(tim, base->resolution); | |
987 | #endif | |
988 | } | |
989 | ||
990 | hrtimer_set_expires_range_ns(timer, tim, delta_ns); | |
991 | ||
992 | timer_stats_hrtimer_set_start_info(timer); | |
993 | ||
994 | leftmost = enqueue_hrtimer(timer, new_base); | |
995 | ||
996 | /* | |
997 | * Only allow reprogramming if the new base is on this CPU. | |
998 | * (it might still be on another CPU if the timer was pending) | |
999 | * | |
1000 | * XXX send_remote_softirq() ? | |
1001 | */ | |
1002 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) | |
1003 | hrtimer_enqueue_reprogram(timer, new_base, wakeup); | |
1004 | ||
1005 | unlock_hrtimer_base(timer, &flags); | |
1006 | ||
1007 | return ret; | |
1008 | } | |
1009 | ||
1010 | /** | |
1011 | * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU | |
1012 | * @timer: the timer to be added | |
1013 | * @tim: expiry time | |
1014 | * @delta_ns: "slack" range for the timer | |
1015 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | |
1016 | * | |
1017 | * Returns: | |
1018 | * 0 on success | |
1019 | * 1 when the timer was active | |
1020 | */ | |
1021 | int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |
1022 | unsigned long delta_ns, const enum hrtimer_mode mode) | |
1023 | { | |
1024 | return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1); | |
1025 | } | |
1026 | EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); | |
1027 | ||
1028 | /** | |
1029 | * hrtimer_start - (re)start an hrtimer on the current CPU | |
1030 | * @timer: the timer to be added | |
1031 | * @tim: expiry time | |
1032 | * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) | |
1033 | * | |
1034 | * Returns: | |
1035 | * 0 on success | |
1036 | * 1 when the timer was active | |
1037 | */ | |
1038 | int | |
1039 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | |
1040 | { | |
1041 | return __hrtimer_start_range_ns(timer, tim, 0, mode, 1); | |
1042 | } | |
1043 | EXPORT_SYMBOL_GPL(hrtimer_start); | |
1044 | ||
1045 | ||
1046 | /** | |
1047 | * hrtimer_try_to_cancel - try to deactivate a timer | |
1048 | * @timer: hrtimer to stop | |
1049 | * | |
1050 | * Returns: | |
1051 | * 0 when the timer was not active | |
1052 | * 1 when the timer was active | |
1053 | * -1 when the timer is currently excuting the callback function and | |
1054 | * cannot be stopped | |
1055 | */ | |
1056 | int hrtimer_try_to_cancel(struct hrtimer *timer) | |
1057 | { | |
1058 | struct hrtimer_clock_base *base; | |
1059 | unsigned long flags; | |
1060 | int ret = -1; | |
1061 | ||
1062 | base = lock_hrtimer_base(timer, &flags); | |
1063 | ||
1064 | if (!hrtimer_callback_running(timer)) | |
1065 | ret = remove_hrtimer(timer, base); | |
1066 | ||
1067 | unlock_hrtimer_base(timer, &flags); | |
1068 | ||
1069 | return ret; | |
1070 | ||
1071 | } | |
1072 | EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); | |
1073 | ||
1074 | /** | |
1075 | * hrtimer_cancel - cancel a timer and wait for the handler to finish. | |
1076 | * @timer: the timer to be cancelled | |
1077 | * | |
1078 | * Returns: | |
1079 | * 0 when the timer was not active | |
1080 | * 1 when the timer was active | |
1081 | */ | |
1082 | int hrtimer_cancel(struct hrtimer *timer) | |
1083 | { | |
1084 | for (;;) { | |
1085 | int ret = hrtimer_try_to_cancel(timer); | |
1086 | ||
1087 | if (ret >= 0) | |
1088 | return ret; | |
1089 | cpu_relax(); | |
1090 | } | |
1091 | } | |
1092 | EXPORT_SYMBOL_GPL(hrtimer_cancel); | |
1093 | ||
1094 | /** | |
1095 | * hrtimer_get_remaining - get remaining time for the timer | |
1096 | * @timer: the timer to read | |
1097 | */ | |
1098 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | |
1099 | { | |
1100 | unsigned long flags; | |
1101 | ktime_t rem; | |
1102 | ||
1103 | lock_hrtimer_base(timer, &flags); | |
1104 | rem = hrtimer_expires_remaining(timer); | |
1105 | unlock_hrtimer_base(timer, &flags); | |
1106 | ||
1107 | return rem; | |
1108 | } | |
1109 | EXPORT_SYMBOL_GPL(hrtimer_get_remaining); | |
1110 | ||
1111 | #ifdef CONFIG_NO_HZ | |
1112 | /** | |
1113 | * hrtimer_get_next_event - get the time until next expiry event | |
1114 | * | |
1115 | * Returns the delta to the next expiry event or KTIME_MAX if no timer | |
1116 | * is pending. | |
1117 | */ | |
1118 | ktime_t hrtimer_get_next_event(void) | |
1119 | { | |
1120 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | |
1121 | struct hrtimer_clock_base *base = cpu_base->clock_base; | |
1122 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; | |
1123 | unsigned long flags; | |
1124 | int i; | |
1125 | ||
1126 | raw_spin_lock_irqsave(&cpu_base->lock, flags); | |
1127 | ||
1128 | if (!hrtimer_hres_active()) { | |
1129 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | |
1130 | struct hrtimer *timer; | |
1131 | ||
1132 | if (!base->first) | |
1133 | continue; | |
1134 | ||
1135 | timer = rb_entry(base->first, struct hrtimer, node); | |
1136 | delta.tv64 = hrtimer_get_expires_tv64(timer); | |
1137 | delta = ktime_sub(delta, base->get_time()); | |
1138 | if (delta.tv64 < mindelta.tv64) | |
1139 | mindelta.tv64 = delta.tv64; | |
1140 | } | |
1141 | } | |
1142 | ||
1143 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | |
1144 | ||
1145 | if (mindelta.tv64 < 0) | |
1146 | mindelta.tv64 = 0; | |
1147 | return mindelta; | |
1148 | } | |
1149 | #endif | |
1150 | ||
1151 | static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
1152 | enum hrtimer_mode mode) | |
1153 | { | |
1154 | struct hrtimer_cpu_base *cpu_base; | |
1155 | ||
1156 | memset(timer, 0, sizeof(struct hrtimer)); | |
1157 | ||
1158 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); | |
1159 | ||
1160 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) | |
1161 | clock_id = CLOCK_MONOTONIC; | |
1162 | ||
1163 | timer->base = &cpu_base->clock_base[clock_id]; | |
1164 | hrtimer_init_timer_hres(timer); | |
1165 | ||
1166 | #ifdef CONFIG_TIMER_STATS | |
1167 | timer->start_site = NULL; | |
1168 | timer->start_pid = -1; | |
1169 | memset(timer->start_comm, 0, TASK_COMM_LEN); | |
1170 | #endif | |
1171 | } | |
1172 | ||
1173 | /** | |
1174 | * hrtimer_init - initialize a timer to the given clock | |
1175 | * @timer: the timer to be initialized | |
1176 | * @clock_id: the clock to be used | |
1177 | * @mode: timer mode abs/rel | |
1178 | */ | |
1179 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |
1180 | enum hrtimer_mode mode) | |
1181 | { | |
1182 | debug_init(timer, clock_id, mode); | |
1183 | __hrtimer_init(timer, clock_id, mode); | |
1184 | } | |
1185 | EXPORT_SYMBOL_GPL(hrtimer_init); | |
1186 | ||
1187 | /** | |
1188 | * hrtimer_get_res - get the timer resolution for a clock | |
1189 | * @which_clock: which clock to query | |
1190 | * @tp: pointer to timespec variable to store the resolution | |
1191 | * | |
1192 | * Store the resolution of the clock selected by @which_clock in the | |
1193 | * variable pointed to by @tp. | |
1194 | */ | |
1195 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | |
1196 | { | |
1197 | struct hrtimer_cpu_base *cpu_base; | |
1198 | ||
1199 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); | |
1200 | *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); | |
1201 | ||
1202 | return 0; | |
1203 | } | |
1204 | EXPORT_SYMBOL_GPL(hrtimer_get_res); | |
1205 | ||
1206 | static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |
1207 | { | |
1208 | struct hrtimer_clock_base *base = timer->base; | |
1209 | struct hrtimer_cpu_base *cpu_base = base->cpu_base; | |
1210 | enum hrtimer_restart (*fn)(struct hrtimer *); | |
1211 | int restart; | |
1212 | ||
1213 | WARN_ON(!irqs_disabled()); | |
1214 | ||
1215 | debug_deactivate(timer); | |
1216 | __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); | |
1217 | timer_stats_account_hrtimer(timer); | |
1218 | fn = timer->function; | |
1219 | ||
1220 | /* | |
1221 | * Because we run timers from hardirq context, there is no chance | |
1222 | * they get migrated to another cpu, therefore its safe to unlock | |
1223 | * the timer base. | |
1224 | */ | |
1225 | raw_spin_unlock(&cpu_base->lock); | |
1226 | trace_hrtimer_expire_entry(timer, now); | |
1227 | restart = fn(timer); | |
1228 | trace_hrtimer_expire_exit(timer); | |
1229 | raw_spin_lock(&cpu_base->lock); | |
1230 | ||
1231 | /* | |
1232 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and | |
1233 | * we do not reprogramm the event hardware. Happens either in | |
1234 | * hrtimer_start_range_ns() or in hrtimer_interrupt() | |
1235 | */ | |
1236 | if (restart != HRTIMER_NORESTART) { | |
1237 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | |
1238 | enqueue_hrtimer(timer, base); | |
1239 | } | |
1240 | ||
1241 | WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); | |
1242 | ||
1243 | timer->state &= ~HRTIMER_STATE_CALLBACK; | |
1244 | } | |
1245 | ||
1246 | #ifdef CONFIG_HIGH_RES_TIMERS | |
1247 | ||
1248 | /* | |
1249 | * High resolution timer interrupt | |
1250 | * Called with interrupts disabled | |
1251 | */ | |
1252 | void hrtimer_interrupt(struct clock_event_device *dev) | |
1253 | { | |
1254 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | |
1255 | struct hrtimer_clock_base *base; | |
1256 | ktime_t expires_next, now, entry_time, delta; | |
1257 | int i, retries = 0; | |
1258 | ||
1259 | BUG_ON(!cpu_base->hres_active); | |
1260 | cpu_base->nr_events++; | |
1261 | dev->next_event.tv64 = KTIME_MAX; | |
1262 | ||
1263 | entry_time = now = ktime_get(); | |
1264 | retry: | |
1265 | expires_next.tv64 = KTIME_MAX; | |
1266 | ||
1267 | raw_spin_lock(&cpu_base->lock); | |
1268 | /* | |
1269 | * We set expires_next to KTIME_MAX here with cpu_base->lock | |
1270 | * held to prevent that a timer is enqueued in our queue via | |
1271 | * the migration code. This does not affect enqueueing of | |
1272 | * timers which run their callback and need to be requeued on | |
1273 | * this CPU. | |
1274 | */ | |
1275 | cpu_base->expires_next.tv64 = KTIME_MAX; | |
1276 | ||
1277 | base = cpu_base->clock_base; | |
1278 | ||
1279 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | |
1280 | ktime_t basenow; | |
1281 | struct rb_node *node; | |
1282 | ||
1283 | basenow = ktime_add(now, base->offset); | |
1284 | ||
1285 | while ((node = base->first)) { | |
1286 | struct hrtimer *timer; | |
1287 | ||
1288 | timer = rb_entry(node, struct hrtimer, node); | |
1289 | ||
1290 | /* | |
1291 | * The immediate goal for using the softexpires is | |
1292 | * minimizing wakeups, not running timers at the | |
1293 | * earliest interrupt after their soft expiration. | |
1294 | * This allows us to avoid using a Priority Search | |
1295 | * Tree, which can answer a stabbing querry for | |
1296 | * overlapping intervals and instead use the simple | |
1297 | * BST we already have. | |
1298 | * We don't add extra wakeups by delaying timers that | |
1299 | * are right-of a not yet expired timer, because that | |
1300 | * timer will have to trigger a wakeup anyway. | |
1301 | */ | |
1302 | ||
1303 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { | |
1304 | ktime_t expires; | |
1305 | ||
1306 | expires = ktime_sub(hrtimer_get_expires(timer), | |
1307 | base->offset); | |
1308 | if (expires.tv64 < expires_next.tv64) | |
1309 | expires_next = expires; | |
1310 | break; | |
1311 | } | |
1312 | ||
1313 | __run_hrtimer(timer, &basenow); | |
1314 | } | |
1315 | base++; | |
1316 | } | |
1317 | ||
1318 | /* | |
1319 | * Store the new expiry value so the migration code can verify | |
1320 | * against it. | |
1321 | */ | |
1322 | cpu_base->expires_next = expires_next; | |
1323 | raw_spin_unlock(&cpu_base->lock); | |
1324 | ||
1325 | /* Reprogramming necessary ? */ | |
1326 | if (expires_next.tv64 == KTIME_MAX || | |
1327 | !tick_program_event(expires_next, 0)) { | |
1328 | cpu_base->hang_detected = 0; | |
1329 | return; | |
1330 | } | |
1331 | ||
1332 | /* | |
1333 | * The next timer was already expired due to: | |
1334 | * - tracing | |
1335 | * - long lasting callbacks | |
1336 | * - being scheduled away when running in a VM | |
1337 | * | |
1338 | * We need to prevent that we loop forever in the hrtimer | |
1339 | * interrupt routine. We give it 3 attempts to avoid | |
1340 | * overreacting on some spurious event. | |
1341 | */ | |
1342 | now = ktime_get(); | |
1343 | cpu_base->nr_retries++; | |
1344 | if (++retries < 3) | |
1345 | goto retry; | |
1346 | /* | |
1347 | * Give the system a chance to do something else than looping | |
1348 | * here. We stored the entry time, so we know exactly how long | |
1349 | * we spent here. We schedule the next event this amount of | |
1350 | * time away. | |
1351 | */ | |
1352 | cpu_base->nr_hangs++; | |
1353 | cpu_base->hang_detected = 1; | |
1354 | delta = ktime_sub(now, entry_time); | |
1355 | if (delta.tv64 > cpu_base->max_hang_time.tv64) | |
1356 | cpu_base->max_hang_time = delta; | |
1357 | /* | |
1358 | * Limit it to a sensible value as we enforce a longer | |
1359 | * delay. Give the CPU at least 100ms to catch up. | |
1360 | */ | |
1361 | if (delta.tv64 > 100 * NSEC_PER_MSEC) | |
1362 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); | |
1363 | else | |
1364 | expires_next = ktime_add(now, delta); | |
1365 | tick_program_event(expires_next, 1); | |
1366 | printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n", | |
1367 | ktime_to_ns(delta)); | |
1368 | } | |
1369 | ||
1370 | /* | |
1371 | * local version of hrtimer_peek_ahead_timers() called with interrupts | |
1372 | * disabled. | |
1373 | */ | |
1374 | static void __hrtimer_peek_ahead_timers(void) | |
1375 | { | |
1376 | struct tick_device *td; | |
1377 | ||
1378 | if (!hrtimer_hres_active()) | |
1379 | return; | |
1380 | ||
1381 | td = &__get_cpu_var(tick_cpu_device); | |
1382 | if (td && td->evtdev) | |
1383 | hrtimer_interrupt(td->evtdev); | |
1384 | } | |
1385 | ||
1386 | /** | |
1387 | * hrtimer_peek_ahead_timers -- run soft-expired timers now | |
1388 | * | |
1389 | * hrtimer_peek_ahead_timers will peek at the timer queue of | |
1390 | * the current cpu and check if there are any timers for which | |
1391 | * the soft expires time has passed. If any such timers exist, | |
1392 | * they are run immediately and then removed from the timer queue. | |
1393 | * | |
1394 | */ | |
1395 | void hrtimer_peek_ahead_timers(void) | |
1396 | { | |
1397 | unsigned long flags; | |
1398 | ||
1399 | local_irq_save(flags); | |
1400 | __hrtimer_peek_ahead_timers(); | |
1401 | local_irq_restore(flags); | |
1402 | } | |
1403 | ||
1404 | static void run_hrtimer_softirq(struct softirq_action *h) | |
1405 | { | |
1406 | hrtimer_peek_ahead_timers(); | |
1407 | } | |
1408 | ||
1409 | #else /* CONFIG_HIGH_RES_TIMERS */ | |
1410 | ||
1411 | static inline void __hrtimer_peek_ahead_timers(void) { } | |
1412 | ||
1413 | #endif /* !CONFIG_HIGH_RES_TIMERS */ | |
1414 | ||
1415 | /* | |
1416 | * Called from timer softirq every jiffy, expire hrtimers: | |
1417 | * | |
1418 | * For HRT its the fall back code to run the softirq in the timer | |
1419 | * softirq context in case the hrtimer initialization failed or has | |
1420 | * not been done yet. | |
1421 | */ | |
1422 | void hrtimer_run_pending(void) | |
1423 | { | |
1424 | if (hrtimer_hres_active()) | |
1425 | return; | |
1426 | ||
1427 | /* | |
1428 | * This _is_ ugly: We have to check in the softirq context, | |
1429 | * whether we can switch to highres and / or nohz mode. The | |
1430 | * clocksource switch happens in the timer interrupt with | |
1431 | * xtime_lock held. Notification from there only sets the | |
1432 | * check bit in the tick_oneshot code, otherwise we might | |
1433 | * deadlock vs. xtime_lock. | |
1434 | */ | |
1435 | if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) | |
1436 | hrtimer_switch_to_hres(); | |
1437 | } | |
1438 | ||
1439 | /* | |
1440 | * Called from hardirq context every jiffy | |
1441 | */ | |
1442 | void hrtimer_run_queues(void) | |
1443 | { | |
1444 | struct rb_node *node; | |
1445 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | |
1446 | struct hrtimer_clock_base *base; | |
1447 | int index, gettime = 1; | |
1448 | ||
1449 | if (hrtimer_hres_active()) | |
1450 | return; | |
1451 | ||
1452 | for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { | |
1453 | base = &cpu_base->clock_base[index]; | |
1454 | ||
1455 | if (!base->first) | |
1456 | continue; | |
1457 | ||
1458 | if (gettime) { | |
1459 | hrtimer_get_softirq_time(cpu_base); | |
1460 | gettime = 0; | |
1461 | } | |
1462 | ||
1463 | raw_spin_lock(&cpu_base->lock); | |
1464 | ||
1465 | while ((node = base->first)) { | |
1466 | struct hrtimer *timer; | |
1467 | ||
1468 | timer = rb_entry(node, struct hrtimer, node); | |
1469 | if (base->softirq_time.tv64 <= | |
1470 | hrtimer_get_expires_tv64(timer)) | |
1471 | break; | |
1472 | ||
1473 | __run_hrtimer(timer, &base->softirq_time); | |
1474 | } | |
1475 | raw_spin_unlock(&cpu_base->lock); | |
1476 | } | |
1477 | } | |
1478 | ||
1479 | /* | |
1480 | * Sleep related functions: | |
1481 | */ | |
1482 | static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) | |
1483 | { | |
1484 | struct hrtimer_sleeper *t = | |
1485 | container_of(timer, struct hrtimer_sleeper, timer); | |
1486 | struct task_struct *task = t->task; | |
1487 | ||
1488 | t->task = NULL; | |
1489 | if (task) | |
1490 | wake_up_process(task); | |
1491 | ||
1492 | return HRTIMER_NORESTART; | |
1493 | } | |
1494 | ||
1495 | void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | |
1496 | { | |
1497 | sl->timer.function = hrtimer_wakeup; | |
1498 | sl->task = task; | |
1499 | } | |
1500 | EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); | |
1501 | ||
1502 | static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) | |
1503 | { | |
1504 | hrtimer_init_sleeper(t, current); | |
1505 | ||
1506 | do { | |
1507 | set_current_state(TASK_INTERRUPTIBLE); | |
1508 | hrtimer_start_expires(&t->timer, mode); | |
1509 | if (!hrtimer_active(&t->timer)) | |
1510 | t->task = NULL; | |
1511 | ||
1512 | if (likely(t->task)) | |
1513 | schedule(); | |
1514 | ||
1515 | hrtimer_cancel(&t->timer); | |
1516 | mode = HRTIMER_MODE_ABS; | |
1517 | ||
1518 | } while (t->task && !signal_pending(current)); | |
1519 | ||
1520 | __set_current_state(TASK_RUNNING); | |
1521 | ||
1522 | return t->task == NULL; | |
1523 | } | |
1524 | ||
1525 | static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) | |
1526 | { | |
1527 | struct timespec rmt; | |
1528 | ktime_t rem; | |
1529 | ||
1530 | rem = hrtimer_expires_remaining(timer); | |
1531 | if (rem.tv64 <= 0) | |
1532 | return 0; | |
1533 | rmt = ktime_to_timespec(rem); | |
1534 | ||
1535 | if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) | |
1536 | return -EFAULT; | |
1537 | ||
1538 | return 1; | |
1539 | } | |
1540 | ||
1541 | long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | |
1542 | { | |
1543 | struct hrtimer_sleeper t; | |
1544 | struct timespec __user *rmtp; | |
1545 | int ret = 0; | |
1546 | ||
1547 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, | |
1548 | HRTIMER_MODE_ABS); | |
1549 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); | |
1550 | ||
1551 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) | |
1552 | goto out; | |
1553 | ||
1554 | rmtp = restart->nanosleep.rmtp; | |
1555 | if (rmtp) { | |
1556 | ret = update_rmtp(&t.timer, rmtp); | |
1557 | if (ret <= 0) | |
1558 | goto out; | |
1559 | } | |
1560 | ||
1561 | /* The other values in restart are already filled in */ | |
1562 | ret = -ERESTART_RESTARTBLOCK; | |
1563 | out: | |
1564 | destroy_hrtimer_on_stack(&t.timer); | |
1565 | return ret; | |
1566 | } | |
1567 | ||
1568 | long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |
1569 | const enum hrtimer_mode mode, const clockid_t clockid) | |
1570 | { | |
1571 | struct restart_block *restart; | |
1572 | struct hrtimer_sleeper t; | |
1573 | int ret = 0; | |
1574 | unsigned long slack; | |
1575 | ||
1576 | slack = current->timer_slack_ns; | |
1577 | if (rt_task(current)) | |
1578 | slack = 0; | |
1579 | ||
1580 | hrtimer_init_on_stack(&t.timer, clockid, mode); | |
1581 | hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); | |
1582 | if (do_nanosleep(&t, mode)) | |
1583 | goto out; | |
1584 | ||
1585 | /* Absolute timers do not update the rmtp value and restart: */ | |
1586 | if (mode == HRTIMER_MODE_ABS) { | |
1587 | ret = -ERESTARTNOHAND; | |
1588 | goto out; | |
1589 | } | |
1590 | ||
1591 | if (rmtp) { | |
1592 | ret = update_rmtp(&t.timer, rmtp); | |
1593 | if (ret <= 0) | |
1594 | goto out; | |
1595 | } | |
1596 | ||
1597 | restart = ¤t_thread_info()->restart_block; | |
1598 | restart->fn = hrtimer_nanosleep_restart; | |
1599 | restart->nanosleep.index = t.timer.base->index; | |
1600 | restart->nanosleep.rmtp = rmtp; | |
1601 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); | |
1602 | ||
1603 | ret = -ERESTART_RESTARTBLOCK; | |
1604 | out: | |
1605 | destroy_hrtimer_on_stack(&t.timer); | |
1606 | return ret; | |
1607 | } | |
1608 | ||
1609 | SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, | |
1610 | struct timespec __user *, rmtp) | |
1611 | { | |
1612 | struct timespec tu; | |
1613 | ||
1614 | if (copy_from_user(&tu, rqtp, sizeof(tu))) | |
1615 | return -EFAULT; | |
1616 | ||
1617 | if (!timespec_valid(&tu)) | |
1618 | return -EINVAL; | |
1619 | ||
1620 | return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); | |
1621 | } | |
1622 | ||
1623 | /* | |
1624 | * Functions related to boot-time initialization: | |
1625 | */ | |
1626 | static void __cpuinit init_hrtimers_cpu(int cpu) | |
1627 | { | |
1628 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | |
1629 | int i; | |
1630 | ||
1631 | raw_spin_lock_init(&cpu_base->lock); | |
1632 | ||
1633 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | |
1634 | cpu_base->clock_base[i].cpu_base = cpu_base; | |
1635 | ||
1636 | hrtimer_init_hres(cpu_base); | |
1637 | } | |
1638 | ||
1639 | #ifdef CONFIG_HOTPLUG_CPU | |
1640 | ||
1641 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |
1642 | struct hrtimer_clock_base *new_base) | |
1643 | { | |
1644 | struct hrtimer *timer; | |
1645 | struct rb_node *node; | |
1646 | ||
1647 | while ((node = rb_first(&old_base->active))) { | |
1648 | timer = rb_entry(node, struct hrtimer, node); | |
1649 | BUG_ON(hrtimer_callback_running(timer)); | |
1650 | debug_deactivate(timer); | |
1651 | ||
1652 | /* | |
1653 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | |
1654 | * timer could be seen as !active and just vanish away | |
1655 | * under us on another CPU | |
1656 | */ | |
1657 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | |
1658 | timer->base = new_base; | |
1659 | /* | |
1660 | * Enqueue the timers on the new cpu. This does not | |
1661 | * reprogram the event device in case the timer | |
1662 | * expires before the earliest on this CPU, but we run | |
1663 | * hrtimer_interrupt after we migrated everything to | |
1664 | * sort out already expired timers and reprogram the | |
1665 | * event device. | |
1666 | */ | |
1667 | enqueue_hrtimer(timer, new_base); | |
1668 | ||
1669 | /* Clear the migration state bit */ | |
1670 | timer->state &= ~HRTIMER_STATE_MIGRATE; | |
1671 | } | |
1672 | } | |
1673 | ||
1674 | static void migrate_hrtimers(int scpu) | |
1675 | { | |
1676 | struct hrtimer_cpu_base *old_base, *new_base; | |
1677 | int i; | |
1678 | ||
1679 | BUG_ON(cpu_online(scpu)); | |
1680 | tick_cancel_sched_timer(scpu); | |
1681 | ||
1682 | local_irq_disable(); | |
1683 | old_base = &per_cpu(hrtimer_bases, scpu); | |
1684 | new_base = &__get_cpu_var(hrtimer_bases); | |
1685 | /* | |
1686 | * The caller is globally serialized and nobody else | |
1687 | * takes two locks at once, deadlock is not possible. | |
1688 | */ | |
1689 | raw_spin_lock(&new_base->lock); | |
1690 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | |
1691 | ||
1692 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | |
1693 | migrate_hrtimer_list(&old_base->clock_base[i], | |
1694 | &new_base->clock_base[i]); | |
1695 | } | |
1696 | ||
1697 | raw_spin_unlock(&old_base->lock); | |
1698 | raw_spin_unlock(&new_base->lock); | |
1699 | ||
1700 | /* Check, if we got expired work to do */ | |
1701 | __hrtimer_peek_ahead_timers(); | |
1702 | local_irq_enable(); | |
1703 | } | |
1704 | ||
1705 | #endif /* CONFIG_HOTPLUG_CPU */ | |
1706 | ||
1707 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |
1708 | unsigned long action, void *hcpu) | |
1709 | { | |
1710 | int scpu = (long)hcpu; | |
1711 | ||
1712 | switch (action) { | |
1713 | ||
1714 | case CPU_UP_PREPARE: | |
1715 | case CPU_UP_PREPARE_FROZEN: | |
1716 | init_hrtimers_cpu(scpu); | |
1717 | break; | |
1718 | ||
1719 | #ifdef CONFIG_HOTPLUG_CPU | |
1720 | case CPU_DYING: | |
1721 | case CPU_DYING_FROZEN: | |
1722 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu); | |
1723 | break; | |
1724 | case CPU_DEAD: | |
1725 | case CPU_DEAD_FROZEN: | |
1726 | { | |
1727 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); | |
1728 | migrate_hrtimers(scpu); | |
1729 | break; | |
1730 | } | |
1731 | #endif | |
1732 | ||
1733 | default: | |
1734 | break; | |
1735 | } | |
1736 | ||
1737 | return NOTIFY_OK; | |
1738 | } | |
1739 | ||
1740 | static struct notifier_block __cpuinitdata hrtimers_nb = { | |
1741 | .notifier_call = hrtimer_cpu_notify, | |
1742 | }; | |
1743 | ||
1744 | void __init hrtimers_init(void) | |
1745 | { | |
1746 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | |
1747 | (void *)(long)smp_processor_id()); | |
1748 | register_cpu_notifier(&hrtimers_nb); | |
1749 | #ifdef CONFIG_HIGH_RES_TIMERS | |
1750 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); | |
1751 | #endif | |
1752 | } | |
1753 | ||
1754 | /** | |
1755 | * schedule_hrtimeout_range_clock - sleep until timeout | |
1756 | * @expires: timeout value (ktime_t) | |
1757 | * @delta: slack in expires timeout (ktime_t) | |
1758 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | |
1759 | * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME | |
1760 | */ | |
1761 | int __sched | |
1762 | schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta, | |
1763 | const enum hrtimer_mode mode, int clock) | |
1764 | { | |
1765 | struct hrtimer_sleeper t; | |
1766 | ||
1767 | /* | |
1768 | * Optimize when a zero timeout value is given. It does not | |
1769 | * matter whether this is an absolute or a relative time. | |
1770 | */ | |
1771 | if (expires && !expires->tv64) { | |
1772 | __set_current_state(TASK_RUNNING); | |
1773 | return 0; | |
1774 | } | |
1775 | ||
1776 | /* | |
1777 | * A NULL parameter means "inifinte" | |
1778 | */ | |
1779 | if (!expires) { | |
1780 | schedule(); | |
1781 | __set_current_state(TASK_RUNNING); | |
1782 | return -EINTR; | |
1783 | } | |
1784 | ||
1785 | hrtimer_init_on_stack(&t.timer, clock, mode); | |
1786 | hrtimer_set_expires_range_ns(&t.timer, *expires, delta); | |
1787 | ||
1788 | hrtimer_init_sleeper(&t, current); | |
1789 | ||
1790 | hrtimer_start_expires(&t.timer, mode); | |
1791 | if (!hrtimer_active(&t.timer)) | |
1792 | t.task = NULL; | |
1793 | ||
1794 | if (likely(t.task)) | |
1795 | schedule(); | |
1796 | ||
1797 | hrtimer_cancel(&t.timer); | |
1798 | destroy_hrtimer_on_stack(&t.timer); | |
1799 | ||
1800 | __set_current_state(TASK_RUNNING); | |
1801 | ||
1802 | return !t.task ? 0 : -EINTR; | |
1803 | } | |
1804 | ||
1805 | /** | |
1806 | * schedule_hrtimeout_range - sleep until timeout | |
1807 | * @expires: timeout value (ktime_t) | |
1808 | * @delta: slack in expires timeout (ktime_t) | |
1809 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | |
1810 | * | |
1811 | * Make the current task sleep until the given expiry time has | |
1812 | * elapsed. The routine will return immediately unless | |
1813 | * the current task state has been set (see set_current_state()). | |
1814 | * | |
1815 | * The @delta argument gives the kernel the freedom to schedule the | |
1816 | * actual wakeup to a time that is both power and performance friendly. | |
1817 | * The kernel give the normal best effort behavior for "@expires+@delta", | |
1818 | * but may decide to fire the timer earlier, but no earlier than @expires. | |
1819 | * | |
1820 | * You can set the task state as follows - | |
1821 | * | |
1822 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
1823 | * pass before the routine returns. | |
1824 | * | |
1825 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1826 | * delivered to the current task. | |
1827 | * | |
1828 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1829 | * routine returns. | |
1830 | * | |
1831 | * Returns 0 when the timer has expired otherwise -EINTR | |
1832 | */ | |
1833 | int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, | |
1834 | const enum hrtimer_mode mode) | |
1835 | { | |
1836 | return schedule_hrtimeout_range_clock(expires, delta, mode, | |
1837 | CLOCK_MONOTONIC); | |
1838 | } | |
1839 | EXPORT_SYMBOL_GPL(schedule_hrtimeout_range); | |
1840 | ||
1841 | /** | |
1842 | * schedule_hrtimeout - sleep until timeout | |
1843 | * @expires: timeout value (ktime_t) | |
1844 | * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL | |
1845 | * | |
1846 | * Make the current task sleep until the given expiry time has | |
1847 | * elapsed. The routine will return immediately unless | |
1848 | * the current task state has been set (see set_current_state()). | |
1849 | * | |
1850 | * You can set the task state as follows - | |
1851 | * | |
1852 | * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to | |
1853 | * pass before the routine returns. | |
1854 | * | |
1855 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | |
1856 | * delivered to the current task. | |
1857 | * | |
1858 | * The current task state is guaranteed to be TASK_RUNNING when this | |
1859 | * routine returns. | |
1860 | * | |
1861 | * Returns 0 when the timer has expired otherwise -EINTR | |
1862 | */ | |
1863 | int __sched schedule_hrtimeout(ktime_t *expires, | |
1864 | const enum hrtimer_mode mode) | |
1865 | { | |
1866 | return schedule_hrtimeout_range(expires, 0, mode); | |
1867 | } | |
1868 | EXPORT_SYMBOL_GPL(schedule_hrtimeout); |