]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/timer.c
[PATCH] locks: don't panic
[net-next-2.6.git] / kernel / timer.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/timer.c
3 *
4 * Kernel internal timers, kernel timekeeping, basic process system calls
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
9 *
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
20 */
21
22#include <linux/kernel_stat.h>
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/percpu.h>
26#include <linux/init.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
29#include <linux/notifier.h>
30#include <linux/thread_info.h>
31#include <linux/time.h>
32#include <linux/jiffies.h>
33#include <linux/posix-timers.h>
34#include <linux/cpu.h>
35#include <linux/syscalls.h>
97a41e26 36#include <linux/delay.h>
1da177e4
LT
37
38#include <asm/uaccess.h>
39#include <asm/unistd.h>
40#include <asm/div64.h>
41#include <asm/timex.h>
42#include <asm/io.h>
43
44#ifdef CONFIG_TIME_INTERPOLATION
45static void time_interpolator_update(long delta_nsec);
46#else
47#define time_interpolator_update(x)
48#endif
49
ecea8d19
TG
50u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
51
52EXPORT_SYMBOL(jiffies_64);
53
1da177e4
LT
54/*
55 * per-CPU timer vector definitions:
56 */
57
58#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
59#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
60#define TVN_SIZE (1 << TVN_BITS)
61#define TVR_SIZE (1 << TVR_BITS)
62#define TVN_MASK (TVN_SIZE - 1)
63#define TVR_MASK (TVR_SIZE - 1)
64
55c888d6
ON
65struct timer_base_s {
66 spinlock_t lock;
67 struct timer_list *running_timer;
68};
69
1da177e4
LT
70typedef struct tvec_s {
71 struct list_head vec[TVN_SIZE];
72} tvec_t;
73
74typedef struct tvec_root_s {
75 struct list_head vec[TVR_SIZE];
76} tvec_root_t;
77
78struct tvec_t_base_s {
55c888d6 79 struct timer_base_s t_base;
1da177e4 80 unsigned long timer_jiffies;
1da177e4
LT
81 tvec_root_t tv1;
82 tvec_t tv2;
83 tvec_t tv3;
84 tvec_t tv4;
85 tvec_t tv5;
86} ____cacheline_aligned_in_smp;
87
88typedef struct tvec_t_base_s tvec_base_t;
a4a6198b
JB
89static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
90static tvec_base_t boot_tvec_bases;
1da177e4
LT
91
92static inline void set_running_timer(tvec_base_t *base,
93 struct timer_list *timer)
94{
95#ifdef CONFIG_SMP
55c888d6 96 base->t_base.running_timer = timer;
1da177e4
LT
97#endif
98}
99
1da177e4
LT
100static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
101{
102 unsigned long expires = timer->expires;
103 unsigned long idx = expires - base->timer_jiffies;
104 struct list_head *vec;
105
106 if (idx < TVR_SIZE) {
107 int i = expires & TVR_MASK;
108 vec = base->tv1.vec + i;
109 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
110 int i = (expires >> TVR_BITS) & TVN_MASK;
111 vec = base->tv2.vec + i;
112 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
113 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
114 vec = base->tv3.vec + i;
115 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
116 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
117 vec = base->tv4.vec + i;
118 } else if ((signed long) idx < 0) {
119 /*
120 * Can happen if you add a timer with expires == jiffies,
121 * or you set a timer to go off in the past
122 */
123 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
124 } else {
125 int i;
126 /* If the timeout is larger than 0xffffffff on 64-bit
127 * architectures then we use the maximum timeout:
128 */
129 if (idx > 0xffffffffUL) {
130 idx = 0xffffffffUL;
131 expires = idx + base->timer_jiffies;
132 }
133 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
134 vec = base->tv5.vec + i;
135 }
136 /*
137 * Timers are FIFO:
138 */
139 list_add_tail(&timer->entry, vec);
140}
141
55c888d6
ON
142typedef struct timer_base_s timer_base_t;
143/*
144 * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
145 * at compile time, and we need timer->base to lock the timer.
146 */
147timer_base_t __init_timer_base
148 ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED };
149EXPORT_SYMBOL(__init_timer_base);
150
151/***
152 * init_timer - initialize a timer.
153 * @timer: the timer to be initialized
154 *
155 * init_timer() must be done to a timer prior calling *any* of the
156 * other timer functions.
157 */
158void fastcall init_timer(struct timer_list *timer)
159{
160 timer->entry.next = NULL;
a4a6198b 161 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id())->t_base;
55c888d6
ON
162}
163EXPORT_SYMBOL(init_timer);
164
165static inline void detach_timer(struct timer_list *timer,
166 int clear_pending)
167{
168 struct list_head *entry = &timer->entry;
169
170 __list_del(entry->prev, entry->next);
171 if (clear_pending)
172 entry->next = NULL;
173 entry->prev = LIST_POISON2;
174}
175
176/*
177 * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock
178 * means that all timers which are tied to this base via timer->base are
179 * locked, and the base itself is locked too.
180 *
181 * So __run_timers/migrate_timers can safely modify all timers which could
182 * be found on ->tvX lists.
183 *
184 * When the timer's base is locked, and the timer removed from list, it is
185 * possible to set timer->base = NULL and drop the lock: the timer remains
186 * locked.
187 */
188static timer_base_t *lock_timer_base(struct timer_list *timer,
189 unsigned long *flags)
190{
191 timer_base_t *base;
192
193 for (;;) {
194 base = timer->base;
195 if (likely(base != NULL)) {
196 spin_lock_irqsave(&base->lock, *flags);
197 if (likely(base == timer->base))
198 return base;
199 /* The timer has migrated to another CPU */
200 spin_unlock_irqrestore(&base->lock, *flags);
201 }
202 cpu_relax();
203 }
204}
205
1da177e4
LT
206int __mod_timer(struct timer_list *timer, unsigned long expires)
207{
55c888d6
ON
208 timer_base_t *base;
209 tvec_base_t *new_base;
1da177e4
LT
210 unsigned long flags;
211 int ret = 0;
212
213 BUG_ON(!timer->function);
1da177e4 214
55c888d6
ON
215 base = lock_timer_base(timer, &flags);
216
217 if (timer_pending(timer)) {
218 detach_timer(timer, 0);
219 ret = 1;
220 }
221
a4a6198b 222 new_base = __get_cpu_var(tvec_bases);
1da177e4 223
55c888d6 224 if (base != &new_base->t_base) {
1da177e4 225 /*
55c888d6
ON
226 * We are trying to schedule the timer on the local CPU.
227 * However we can't change timer's base while it is running,
228 * otherwise del_timer_sync() can't detect that the timer's
229 * handler yet has not finished. This also guarantees that
230 * the timer is serialized wrt itself.
1da177e4 231 */
55c888d6
ON
232 if (unlikely(base->running_timer == timer)) {
233 /* The timer remains on a former base */
234 new_base = container_of(base, tvec_base_t, t_base);
235 } else {
236 /* See the comment in lock_timer_base() */
237 timer->base = NULL;
238 spin_unlock(&base->lock);
239 spin_lock(&new_base->t_base.lock);
240 timer->base = &new_base->t_base;
1da177e4
LT
241 }
242 }
243
1da177e4
LT
244 timer->expires = expires;
245 internal_add_timer(new_base, timer);
55c888d6 246 spin_unlock_irqrestore(&new_base->t_base.lock, flags);
1da177e4
LT
247
248 return ret;
249}
250
251EXPORT_SYMBOL(__mod_timer);
252
253/***
254 * add_timer_on - start a timer on a particular CPU
255 * @timer: the timer to be added
256 * @cpu: the CPU to start it on
257 *
258 * This is not very scalable on SMP. Double adds are not possible.
259 */
260void add_timer_on(struct timer_list *timer, int cpu)
261{
a4a6198b 262 tvec_base_t *base = per_cpu(tvec_bases, cpu);
1da177e4 263 unsigned long flags;
55c888d6 264
1da177e4 265 BUG_ON(timer_pending(timer) || !timer->function);
55c888d6
ON
266 spin_lock_irqsave(&base->t_base.lock, flags);
267 timer->base = &base->t_base;
1da177e4 268 internal_add_timer(base, timer);
55c888d6 269 spin_unlock_irqrestore(&base->t_base.lock, flags);
1da177e4
LT
270}
271
272
273/***
274 * mod_timer - modify a timer's timeout
275 * @timer: the timer to be modified
276 *
277 * mod_timer is a more efficient way to update the expire field of an
278 * active timer (if the timer is inactive it will be activated)
279 *
280 * mod_timer(timer, expires) is equivalent to:
281 *
282 * del_timer(timer); timer->expires = expires; add_timer(timer);
283 *
284 * Note that if there are multiple unserialized concurrent users of the
285 * same timer, then mod_timer() is the only safe way to modify the timeout,
286 * since add_timer() cannot modify an already running timer.
287 *
288 * The function returns whether it has modified a pending timer or not.
289 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
290 * active timer returns 1.)
291 */
292int mod_timer(struct timer_list *timer, unsigned long expires)
293{
294 BUG_ON(!timer->function);
295
1da177e4
LT
296 /*
297 * This is a common optimization triggered by the
298 * networking code - if the timer is re-modified
299 * to be the same thing then just return:
300 */
301 if (timer->expires == expires && timer_pending(timer))
302 return 1;
303
304 return __mod_timer(timer, expires);
305}
306
307EXPORT_SYMBOL(mod_timer);
308
309/***
310 * del_timer - deactive a timer.
311 * @timer: the timer to be deactivated
312 *
313 * del_timer() deactivates a timer - this works on both active and inactive
314 * timers.
315 *
316 * The function returns whether it has deactivated a pending timer or not.
317 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
318 * active timer returns 1.)
319 */
320int del_timer(struct timer_list *timer)
321{
55c888d6 322 timer_base_t *base;
1da177e4 323 unsigned long flags;
55c888d6 324 int ret = 0;
1da177e4 325
55c888d6
ON
326 if (timer_pending(timer)) {
327 base = lock_timer_base(timer, &flags);
328 if (timer_pending(timer)) {
329 detach_timer(timer, 1);
330 ret = 1;
331 }
1da177e4 332 spin_unlock_irqrestore(&base->lock, flags);
1da177e4 333 }
1da177e4 334
55c888d6 335 return ret;
1da177e4
LT
336}
337
338EXPORT_SYMBOL(del_timer);
339
340#ifdef CONFIG_SMP
fd450b73
ON
341/*
342 * This function tries to deactivate a timer. Upon successful (ret >= 0)
343 * exit the timer is not queued and the handler is not running on any CPU.
344 *
345 * It must not be called from interrupt contexts.
346 */
347int try_to_del_timer_sync(struct timer_list *timer)
348{
349 timer_base_t *base;
350 unsigned long flags;
351 int ret = -1;
352
353 base = lock_timer_base(timer, &flags);
354
355 if (base->running_timer == timer)
356 goto out;
357
358 ret = 0;
359 if (timer_pending(timer)) {
360 detach_timer(timer, 1);
361 ret = 1;
362 }
363out:
364 spin_unlock_irqrestore(&base->lock, flags);
365
366 return ret;
367}
368
1da177e4
LT
369/***
370 * del_timer_sync - deactivate a timer and wait for the handler to finish.
371 * @timer: the timer to be deactivated
372 *
373 * This function only differs from del_timer() on SMP: besides deactivating
374 * the timer it also makes sure the handler has finished executing on other
375 * CPUs.
376 *
377 * Synchronization rules: callers must prevent restarting of the timer,
378 * otherwise this function is meaningless. It must not be called from
379 * interrupt contexts. The caller must not hold locks which would prevent
55c888d6
ON
380 * completion of the timer's handler. The timer's handler must not call
381 * add_timer_on(). Upon exit the timer is not queued and the handler is
382 * not running on any CPU.
1da177e4
LT
383 *
384 * The function returns whether it has deactivated a pending timer or not.
1da177e4
LT
385 */
386int del_timer_sync(struct timer_list *timer)
387{
fd450b73
ON
388 for (;;) {
389 int ret = try_to_del_timer_sync(timer);
390 if (ret >= 0)
391 return ret;
392 }
1da177e4 393}
1da177e4 394
55c888d6 395EXPORT_SYMBOL(del_timer_sync);
1da177e4
LT
396#endif
397
398static int cascade(tvec_base_t *base, tvec_t *tv, int index)
399{
400 /* cascade all the timers from tv up one level */
401 struct list_head *head, *curr;
402
403 head = tv->vec + index;
404 curr = head->next;
405 /*
406 * We are removing _all_ timers from the list, so we don't have to
407 * detach them individually, just clear the list afterwards.
408 */
409 while (curr != head) {
410 struct timer_list *tmp;
411
412 tmp = list_entry(curr, struct timer_list, entry);
55c888d6 413 BUG_ON(tmp->base != &base->t_base);
1da177e4
LT
414 curr = curr->next;
415 internal_add_timer(base, tmp);
416 }
417 INIT_LIST_HEAD(head);
418
419 return index;
420}
421
422/***
423 * __run_timers - run all expired timers (if any) on this CPU.
424 * @base: the timer vector to be processed.
425 *
426 * This function cascades all vectors and executes all expired timer
427 * vectors.
428 */
429#define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
430
431static inline void __run_timers(tvec_base_t *base)
432{
433 struct timer_list *timer;
434
55c888d6 435 spin_lock_irq(&base->t_base.lock);
1da177e4
LT
436 while (time_after_eq(jiffies, base->timer_jiffies)) {
437 struct list_head work_list = LIST_HEAD_INIT(work_list);
438 struct list_head *head = &work_list;
439 int index = base->timer_jiffies & TVR_MASK;
440
441 /*
442 * Cascade timers:
443 */
444 if (!index &&
445 (!cascade(base, &base->tv2, INDEX(0))) &&
446 (!cascade(base, &base->tv3, INDEX(1))) &&
447 !cascade(base, &base->tv4, INDEX(2)))
448 cascade(base, &base->tv5, INDEX(3));
449 ++base->timer_jiffies;
450 list_splice_init(base->tv1.vec + index, &work_list);
55c888d6 451 while (!list_empty(head)) {
1da177e4
LT
452 void (*fn)(unsigned long);
453 unsigned long data;
454
455 timer = list_entry(head->next,struct timer_list,entry);
456 fn = timer->function;
457 data = timer->data;
458
1da177e4 459 set_running_timer(base, timer);
55c888d6
ON
460 detach_timer(timer, 1);
461 spin_unlock_irq(&base->t_base.lock);
1da177e4 462 {
be5b4fbd 463 int preempt_count = preempt_count();
1da177e4
LT
464 fn(data);
465 if (preempt_count != preempt_count()) {
be5b4fbd
JJ
466 printk(KERN_WARNING "huh, entered %p "
467 "with preempt_count %08x, exited"
468 " with %08x?\n",
469 fn, preempt_count,
470 preempt_count());
1da177e4
LT
471 BUG();
472 }
473 }
55c888d6 474 spin_lock_irq(&base->t_base.lock);
1da177e4
LT
475 }
476 }
477 set_running_timer(base, NULL);
55c888d6 478 spin_unlock_irq(&base->t_base.lock);
1da177e4
LT
479}
480
481#ifdef CONFIG_NO_IDLE_HZ
482/*
483 * Find out when the next timer event is due to happen. This
484 * is used on S/390 to stop all activity when a cpus is idle.
485 * This functions needs to be called disabled.
486 */
487unsigned long next_timer_interrupt(void)
488{
489 tvec_base_t *base;
490 struct list_head *list;
491 struct timer_list *nte;
492 unsigned long expires;
69239749
TL
493 unsigned long hr_expires = MAX_JIFFY_OFFSET;
494 ktime_t hr_delta;
1da177e4
LT
495 tvec_t *varray[4];
496 int i, j;
497
69239749
TL
498 hr_delta = hrtimer_get_next_event();
499 if (hr_delta.tv64 != KTIME_MAX) {
500 struct timespec tsdelta;
501 tsdelta = ktime_to_timespec(hr_delta);
502 hr_expires = timespec_to_jiffies(&tsdelta);
503 if (hr_expires < 3)
504 return hr_expires + jiffies;
505 }
506 hr_expires += jiffies;
507
a4a6198b 508 base = __get_cpu_var(tvec_bases);
55c888d6 509 spin_lock(&base->t_base.lock);
1da177e4 510 expires = base->timer_jiffies + (LONG_MAX >> 1);
53f087fe 511 list = NULL;
1da177e4
LT
512
513 /* Look for timer events in tv1. */
514 j = base->timer_jiffies & TVR_MASK;
515 do {
516 list_for_each_entry(nte, base->tv1.vec + j, entry) {
517 expires = nte->expires;
518 if (j < (base->timer_jiffies & TVR_MASK))
519 list = base->tv2.vec + (INDEX(0));
520 goto found;
521 }
522 j = (j + 1) & TVR_MASK;
523 } while (j != (base->timer_jiffies & TVR_MASK));
524
525 /* Check tv2-tv5. */
526 varray[0] = &base->tv2;
527 varray[1] = &base->tv3;
528 varray[2] = &base->tv4;
529 varray[3] = &base->tv5;
530 for (i = 0; i < 4; i++) {
531 j = INDEX(i);
532 do {
533 if (list_empty(varray[i]->vec + j)) {
534 j = (j + 1) & TVN_MASK;
535 continue;
536 }
537 list_for_each_entry(nte, varray[i]->vec + j, entry)
538 if (time_before(nte->expires, expires))
539 expires = nte->expires;
540 if (j < (INDEX(i)) && i < 3)
541 list = varray[i + 1]->vec + (INDEX(i + 1));
542 goto found;
543 } while (j != (INDEX(i)));
544 }
545found:
546 if (list) {
547 /*
548 * The search wrapped. We need to look at the next list
549 * from next tv element that would cascade into tv element
550 * where we found the timer element.
551 */
552 list_for_each_entry(nte, list, entry) {
553 if (time_before(nte->expires, expires))
554 expires = nte->expires;
555 }
556 }
55c888d6 557 spin_unlock(&base->t_base.lock);
69239749
TL
558
559 if (time_before(hr_expires, expires))
560 return hr_expires;
561
1da177e4
LT
562 return expires;
563}
564#endif
565
566/******************************************************************/
567
568/*
569 * Timekeeping variables
570 */
571unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
572unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
573
574/*
575 * The current time
576 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
577 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
578 * at zero at system boot time, so wall_to_monotonic will be negative,
579 * however, we will ALWAYS keep the tv_nsec part positive so we can use
580 * the usual normalization.
581 */
582struct timespec xtime __attribute__ ((aligned (16)));
583struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
584
585EXPORT_SYMBOL(xtime);
586
587/* Don't completely fail for HZ > 500. */
588int tickadj = 500/HZ ? : 1; /* microsecs */
589
590
591/*
592 * phase-lock loop variables
593 */
594/* TIME_ERROR prevents overwriting the CMOS clock */
595int time_state = TIME_OK; /* clock synchronization status */
596int time_status = STA_UNSYNC; /* clock status bits */
597long time_offset; /* time adjustment (us) */
598long time_constant = 2; /* pll time constant */
599long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
600long time_precision = 1; /* clock precision (us) */
601long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
602long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
603static long time_phase; /* phase offset (scaled us) */
604long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
605 /* frequency offset (scaled ppm)*/
606static long time_adj; /* tick adjust (scaled 1 / HZ) */
607long time_reftime; /* time at last adjustment (s) */
608long time_adjust;
609long time_next_adjust;
610
611/*
612 * this routine handles the overflow of the microsecond field
613 *
614 * The tricky bits of code to handle the accurate clock support
615 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
616 * They were originally developed for SUN and DEC kernels.
617 * All the kudos should go to Dave for this stuff.
618 *
619 */
620static void second_overflow(void)
621{
a5a0d52c
AM
622 long ltemp;
623
624 /* Bump the maxerror field */
625 time_maxerror += time_tolerance >> SHIFT_USEC;
626 if (time_maxerror > NTP_PHASE_LIMIT) {
627 time_maxerror = NTP_PHASE_LIMIT;
628 time_status |= STA_UNSYNC;
1da177e4 629 }
a5a0d52c
AM
630
631 /*
632 * Leap second processing. If in leap-insert state at the end of the
633 * day, the system clock is set back one second; if in leap-delete
634 * state, the system clock is set ahead one second. The microtime()
635 * routine or external clock driver will insure that reported time is
636 * always monotonic. The ugly divides should be replaced.
637 */
638 switch (time_state) {
639 case TIME_OK:
640 if (time_status & STA_INS)
641 time_state = TIME_INS;
642 else if (time_status & STA_DEL)
643 time_state = TIME_DEL;
644 break;
645 case TIME_INS:
646 if (xtime.tv_sec % 86400 == 0) {
647 xtime.tv_sec--;
648 wall_to_monotonic.tv_sec++;
649 /*
650 * The timer interpolator will make time change
651 * gradually instead of an immediate jump by one second
652 */
653 time_interpolator_update(-NSEC_PER_SEC);
654 time_state = TIME_OOP;
655 clock_was_set();
656 printk(KERN_NOTICE "Clock: inserting leap second "
657 "23:59:60 UTC\n");
658 }
659 break;
660 case TIME_DEL:
661 if ((xtime.tv_sec + 1) % 86400 == 0) {
662 xtime.tv_sec++;
663 wall_to_monotonic.tv_sec--;
664 /*
665 * Use of time interpolator for a gradual change of
666 * time
667 */
668 time_interpolator_update(NSEC_PER_SEC);
669 time_state = TIME_WAIT;
670 clock_was_set();
671 printk(KERN_NOTICE "Clock: deleting leap second "
672 "23:59:59 UTC\n");
673 }
674 break;
675 case TIME_OOP:
676 time_state = TIME_WAIT;
677 break;
678 case TIME_WAIT:
679 if (!(time_status & (STA_INS | STA_DEL)))
680 time_state = TIME_OK;
1da177e4 681 }
a5a0d52c
AM
682
683 /*
684 * Compute the phase adjustment for the next second. In PLL mode, the
685 * offset is reduced by a fixed factor times the time constant. In FLL
686 * mode the offset is used directly. In either mode, the maximum phase
687 * adjustment for each second is clamped so as to spread the adjustment
688 * over not more than the number of seconds between updates.
689 */
1da177e4
LT
690 ltemp = time_offset;
691 if (!(time_status & STA_FLL))
1bb34a41
JS
692 ltemp = shift_right(ltemp, SHIFT_KG + time_constant);
693 ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE);
694 ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE);
1da177e4
LT
695 time_offset -= ltemp;
696 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
1da177e4 697
a5a0d52c
AM
698 /*
699 * Compute the frequency estimate and additional phase adjustment due
5ddcfa87 700 * to frequency error for the next second.
a5a0d52c 701 */
5ddcfa87 702 ltemp = time_freq;
a5a0d52c 703 time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
1da177e4
LT
704
705#if HZ == 100
a5a0d52c
AM
706 /*
707 * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to
708 * get 128.125; => only 0.125% error (p. 14)
709 */
710 time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5);
1da177e4 711#endif
4b8f573b 712#if HZ == 250
a5a0d52c
AM
713 /*
714 * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and
715 * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
716 */
717 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
4b8f573b 718#endif
1da177e4 719#if HZ == 1000
a5a0d52c
AM
720 /*
721 * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and
722 * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
723 */
724 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
1da177e4
LT
725#endif
726}
727
726c14bf
PM
728/*
729 * Returns how many microseconds we need to add to xtime this tick
730 * in doing an adjustment requested with adjtime.
731 */
732static long adjtime_adjustment(void)
1da177e4 733{
726c14bf 734 long time_adjust_step;
1da177e4 735
726c14bf
PM
736 time_adjust_step = time_adjust;
737 if (time_adjust_step) {
a5a0d52c
AM
738 /*
739 * We are doing an adjtime thing. Prepare time_adjust_step to
740 * be within bounds. Note that a positive time_adjust means we
741 * want the clock to run faster.
742 *
743 * Limit the amount of the step to be in the range
744 * -tickadj .. +tickadj
745 */
746 time_adjust_step = min(time_adjust_step, (long)tickadj);
747 time_adjust_step = max(time_adjust_step, (long)-tickadj);
726c14bf
PM
748 }
749 return time_adjust_step;
750}
a5a0d52c 751
726c14bf
PM
752/* in the NTP reference this is called "hardclock()" */
753static void update_wall_time_one_tick(void)
754{
755 long time_adjust_step, delta_nsec;
756
757 time_adjust_step = adjtime_adjustment();
758 if (time_adjust_step)
a5a0d52c
AM
759 /* Reduce by this step the amount of time left */
760 time_adjust -= time_adjust_step;
1da177e4
LT
761 delta_nsec = tick_nsec + time_adjust_step * 1000;
762 /*
763 * Advance the phase, once it gets to one microsecond, then
764 * advance the tick more.
765 */
766 time_phase += time_adj;
1bb34a41
JS
767 if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
768 long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
1da177e4
LT
769 time_phase -= ltemp << (SHIFT_SCALE - 10);
770 delta_nsec += ltemp;
771 }
772 xtime.tv_nsec += delta_nsec;
773 time_interpolator_update(delta_nsec);
774
775 /* Changes by adjtime() do not take effect till next tick. */
776 if (time_next_adjust != 0) {
777 time_adjust = time_next_adjust;
778 time_next_adjust = 0;
779 }
780}
781
726c14bf
PM
782/*
783 * Return how long ticks are at the moment, that is, how much time
784 * update_wall_time_one_tick will add to xtime next time we call it
785 * (assuming no calls to do_adjtimex in the meantime).
786 * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
787 * bits to the right of the binary point.
788 * This function has no side-effects.
789 */
790u64 current_tick_length(void)
791{
792 long delta_nsec;
793
794 delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
795 return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
796}
797
1da177e4
LT
798/*
799 * Using a loop looks inefficient, but "ticks" is
800 * usually just one (we shouldn't be losing ticks,
801 * we're doing this this way mainly for interrupt
802 * latency reasons, not because we think we'll
803 * have lots of lost timer ticks
804 */
805static void update_wall_time(unsigned long ticks)
806{
807 do {
808 ticks--;
809 update_wall_time_one_tick();
810 if (xtime.tv_nsec >= 1000000000) {
811 xtime.tv_nsec -= 1000000000;
812 xtime.tv_sec++;
813 second_overflow();
814 }
815 } while (ticks);
816}
817
818/*
819 * Called from the timer interrupt handler to charge one tick to the current
820 * process. user_tick is 1 if the tick is user time, 0 for system.
821 */
822void update_process_times(int user_tick)
823{
824 struct task_struct *p = current;
825 int cpu = smp_processor_id();
826
827 /* Note: this timer irq context must be accounted for as well. */
828 if (user_tick)
829 account_user_time(p, jiffies_to_cputime(1));
830 else
831 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
832 run_local_timers();
833 if (rcu_pending(cpu))
834 rcu_check_callbacks(cpu, user_tick);
835 scheduler_tick();
836 run_posix_cpu_timers(p);
837}
838
839/*
840 * Nr of active tasks - counted in fixed-point numbers
841 */
842static unsigned long count_active_tasks(void)
843{
844 return (nr_running() + nr_uninterruptible()) * FIXED_1;
845}
846
847/*
848 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
849 * imply that avenrun[] is the standard name for this kind of thing.
850 * Nothing else seems to be standardized: the fractional size etc
851 * all seem to differ on different machines.
852 *
853 * Requires xtime_lock to access.
854 */
855unsigned long avenrun[3];
856
857EXPORT_SYMBOL(avenrun);
858
859/*
860 * calc_load - given tick count, update the avenrun load estimates.
861 * This is called while holding a write_lock on xtime_lock.
862 */
863static inline void calc_load(unsigned long ticks)
864{
865 unsigned long active_tasks; /* fixed-point */
866 static int count = LOAD_FREQ;
867
868 count -= ticks;
869 if (count < 0) {
870 count += LOAD_FREQ;
871 active_tasks = count_active_tasks();
872 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
873 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
874 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
875 }
876}
877
878/* jiffies at the most recent update of wall time */
879unsigned long wall_jiffies = INITIAL_JIFFIES;
880
881/*
882 * This read-write spinlock protects us from races in SMP while
883 * playing with xtime and avenrun.
884 */
885#ifndef ARCH_HAVE_XTIME_LOCK
886seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
887
888EXPORT_SYMBOL(xtime_lock);
889#endif
890
891/*
892 * This function runs timers and the timer-tq in bottom half context.
893 */
894static void run_timer_softirq(struct softirq_action *h)
895{
a4a6198b 896 tvec_base_t *base = __get_cpu_var(tvec_bases);
1da177e4 897
c0a31329 898 hrtimer_run_queues();
1da177e4
LT
899 if (time_after_eq(jiffies, base->timer_jiffies))
900 __run_timers(base);
901}
902
903/*
904 * Called by the local, per-CPU timer interrupt on SMP.
905 */
906void run_local_timers(void)
907{
908 raise_softirq(TIMER_SOFTIRQ);
6687a97d 909 softlockup_tick();
1da177e4
LT
910}
911
912/*
913 * Called by the timer interrupt. xtime_lock must already be taken
914 * by the timer IRQ!
915 */
916static inline void update_times(void)
917{
918 unsigned long ticks;
919
920 ticks = jiffies - wall_jiffies;
921 if (ticks) {
922 wall_jiffies += ticks;
923 update_wall_time(ticks);
924 }
925 calc_load(ticks);
926}
927
928/*
929 * The 64-bit jiffies value is not atomic - you MUST NOT read it
930 * without sampling the sequence number in xtime_lock.
931 * jiffies is defined in the linker script...
932 */
933
934void do_timer(struct pt_regs *regs)
935{
936 jiffies_64++;
5aee405c
AN
937 /* prevent loading jiffies before storing new jiffies_64 value. */
938 barrier();
1da177e4
LT
939 update_times();
940}
941
942#ifdef __ARCH_WANT_SYS_ALARM
943
944/*
945 * For backwards compatibility? This can be done in libc so Alpha
946 * and all newer ports shouldn't need it.
947 */
948asmlinkage unsigned long sys_alarm(unsigned int seconds)
949{
c08b8a49 950 return alarm_setitimer(seconds);
1da177e4
LT
951}
952
953#endif
954
955#ifndef __alpha__
956
957/*
958 * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
959 * should be moved into arch/i386 instead?
960 */
961
962/**
963 * sys_getpid - return the thread group id of the current process
964 *
965 * Note, despite the name, this returns the tgid not the pid. The tgid and
966 * the pid are identical unless CLONE_THREAD was specified on clone() in
967 * which case the tgid is the same in all threads of the same group.
968 *
969 * This is SMP safe as current->tgid does not change.
970 */
971asmlinkage long sys_getpid(void)
972{
973 return current->tgid;
974}
975
976/*
977 * Accessing ->group_leader->real_parent is not SMP-safe, it could
978 * change from under us. However, rather than getting any lock
979 * we can use an optimistic algorithm: get the parent
980 * pid, and go back and check that the parent is still
981 * the same. If it has changed (which is extremely unlikely
982 * indeed), we just try again..
983 *
984 * NOTE! This depends on the fact that even if we _do_
985 * get an old value of "parent", we can happily dereference
986 * the pointer (it was and remains a dereferencable kernel pointer
987 * no matter what): we just can't necessarily trust the result
988 * until we know that the parent pointer is valid.
989 *
990 * NOTE2: ->group_leader never changes from under us.
991 */
992asmlinkage long sys_getppid(void)
993{
994 int pid;
995 struct task_struct *me = current;
996 struct task_struct *parent;
997
998 parent = me->group_leader->real_parent;
999 for (;;) {
1000 pid = parent->tgid;
4c5640cb 1001#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1da177e4
LT
1002{
1003 struct task_struct *old = parent;
1004
1005 /*
1006 * Make sure we read the pid before re-reading the
1007 * parent pointer:
1008 */
d59dd462 1009 smp_rmb();
1da177e4
LT
1010 parent = me->group_leader->real_parent;
1011 if (old != parent)
1012 continue;
1013}
1014#endif
1015 break;
1016 }
1017 return pid;
1018}
1019
1020asmlinkage long sys_getuid(void)
1021{
1022 /* Only we change this so SMP safe */
1023 return current->uid;
1024}
1025
1026asmlinkage long sys_geteuid(void)
1027{
1028 /* Only we change this so SMP safe */
1029 return current->euid;
1030}
1031
1032asmlinkage long sys_getgid(void)
1033{
1034 /* Only we change this so SMP safe */
1035 return current->gid;
1036}
1037
1038asmlinkage long sys_getegid(void)
1039{
1040 /* Only we change this so SMP safe */
1041 return current->egid;
1042}
1043
1044#endif
1045
1046static void process_timeout(unsigned long __data)
1047{
1048 wake_up_process((task_t *)__data);
1049}
1050
1051/**
1052 * schedule_timeout - sleep until timeout
1053 * @timeout: timeout value in jiffies
1054 *
1055 * Make the current task sleep until @timeout jiffies have
1056 * elapsed. The routine will return immediately unless
1057 * the current task state has been set (see set_current_state()).
1058 *
1059 * You can set the task state as follows -
1060 *
1061 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1062 * pass before the routine returns. The routine will return 0
1063 *
1064 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1065 * delivered to the current task. In this case the remaining time
1066 * in jiffies will be returned, or 0 if the timer expired in time
1067 *
1068 * The current task state is guaranteed to be TASK_RUNNING when this
1069 * routine returns.
1070 *
1071 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1072 * the CPU away without a bound on the timeout. In this case the return
1073 * value will be %MAX_SCHEDULE_TIMEOUT.
1074 *
1075 * In all cases the return value is guaranteed to be non-negative.
1076 */
1077fastcall signed long __sched schedule_timeout(signed long timeout)
1078{
1079 struct timer_list timer;
1080 unsigned long expire;
1081
1082 switch (timeout)
1083 {
1084 case MAX_SCHEDULE_TIMEOUT:
1085 /*
1086 * These two special cases are useful to be comfortable
1087 * in the caller. Nothing more. We could take
1088 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1089 * but I' d like to return a valid offset (>=0) to allow
1090 * the caller to do everything it want with the retval.
1091 */
1092 schedule();
1093 goto out;
1094 default:
1095 /*
1096 * Another bit of PARANOID. Note that the retval will be
1097 * 0 since no piece of kernel is supposed to do a check
1098 * for a negative retval of schedule_timeout() (since it
1099 * should never happens anyway). You just have the printk()
1100 * that will tell you if something is gone wrong and where.
1101 */
1102 if (timeout < 0)
1103 {
1104 printk(KERN_ERR "schedule_timeout: wrong timeout "
a5a0d52c
AM
1105 "value %lx from %p\n", timeout,
1106 __builtin_return_address(0));
1da177e4
LT
1107 current->state = TASK_RUNNING;
1108 goto out;
1109 }
1110 }
1111
1112 expire = timeout + jiffies;
1113
a8db2db1
ON
1114 setup_timer(&timer, process_timeout, (unsigned long)current);
1115 __mod_timer(&timer, expire);
1da177e4
LT
1116 schedule();
1117 del_singleshot_timer_sync(&timer);
1118
1119 timeout = expire - jiffies;
1120
1121 out:
1122 return timeout < 0 ? 0 : timeout;
1123}
1da177e4
LT
1124EXPORT_SYMBOL(schedule_timeout);
1125
8a1c1757
AM
1126/*
1127 * We can use __set_current_state() here because schedule_timeout() calls
1128 * schedule() unconditionally.
1129 */
64ed93a2
NA
1130signed long __sched schedule_timeout_interruptible(signed long timeout)
1131{
a5a0d52c
AM
1132 __set_current_state(TASK_INTERRUPTIBLE);
1133 return schedule_timeout(timeout);
64ed93a2
NA
1134}
1135EXPORT_SYMBOL(schedule_timeout_interruptible);
1136
1137signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1138{
a5a0d52c
AM
1139 __set_current_state(TASK_UNINTERRUPTIBLE);
1140 return schedule_timeout(timeout);
64ed93a2
NA
1141}
1142EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1143
1da177e4
LT
1144/* Thread ID - the internal kernel "pid" */
1145asmlinkage long sys_gettid(void)
1146{
1147 return current->pid;
1148}
1149
1da177e4
LT
1150/*
1151 * sys_sysinfo - fill in sysinfo struct
1152 */
1153asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1154{
1155 struct sysinfo val;
1156 unsigned long mem_total, sav_total;
1157 unsigned int mem_unit, bitcount;
1158 unsigned long seq;
1159
1160 memset((char *)&val, 0, sizeof(struct sysinfo));
1161
1162 do {
1163 struct timespec tp;
1164 seq = read_seqbegin(&xtime_lock);
1165
1166 /*
1167 * This is annoying. The below is the same thing
1168 * posix_get_clock_monotonic() does, but it wants to
1169 * take the lock which we want to cover the loads stuff
1170 * too.
1171 */
1172
1173 getnstimeofday(&tp);
1174 tp.tv_sec += wall_to_monotonic.tv_sec;
1175 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1176 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1177 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1178 tp.tv_sec++;
1179 }
1180 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1181
1182 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1183 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1184 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1185
1186 val.procs = nr_threads;
1187 } while (read_seqretry(&xtime_lock, seq));
1188
1189 si_meminfo(&val);
1190 si_swapinfo(&val);
1191
1192 /*
1193 * If the sum of all the available memory (i.e. ram + swap)
1194 * is less than can be stored in a 32 bit unsigned long then
1195 * we can be binary compatible with 2.2.x kernels. If not,
1196 * well, in that case 2.2.x was broken anyways...
1197 *
1198 * -Erik Andersen <andersee@debian.org>
1199 */
1200
1201 mem_total = val.totalram + val.totalswap;
1202 if (mem_total < val.totalram || mem_total < val.totalswap)
1203 goto out;
1204 bitcount = 0;
1205 mem_unit = val.mem_unit;
1206 while (mem_unit > 1) {
1207 bitcount++;
1208 mem_unit >>= 1;
1209 sav_total = mem_total;
1210 mem_total <<= 1;
1211 if (mem_total < sav_total)
1212 goto out;
1213 }
1214
1215 /*
1216 * If mem_total did not overflow, multiply all memory values by
1217 * val.mem_unit and set it to 1. This leaves things compatible
1218 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1219 * kernels...
1220 */
1221
1222 val.mem_unit = 1;
1223 val.totalram <<= bitcount;
1224 val.freeram <<= bitcount;
1225 val.sharedram <<= bitcount;
1226 val.bufferram <<= bitcount;
1227 val.totalswap <<= bitcount;
1228 val.freeswap <<= bitcount;
1229 val.totalhigh <<= bitcount;
1230 val.freehigh <<= bitcount;
1231
1232 out:
1233 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1234 return -EFAULT;
1235
1236 return 0;
1237}
1238
a4a6198b 1239static int __devinit init_timers_cpu(int cpu)
1da177e4
LT
1240{
1241 int j;
1242 tvec_base_t *base;
55c888d6 1243
a4a6198b
JB
1244 base = per_cpu(tvec_bases, cpu);
1245 if (!base) {
1246 static char boot_done;
1247
1248 /*
1249 * Cannot do allocation in init_timers as that runs before the
1250 * allocator initializes (and would waste memory if there are
1251 * more possible CPUs than will ever be installed/brought up).
1252 */
1253 if (boot_done) {
1254 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1255 cpu_to_node(cpu));
1256 if (!base)
1257 return -ENOMEM;
1258 memset(base, 0, sizeof(*base));
1259 } else {
1260 base = &boot_tvec_bases;
1261 boot_done = 1;
1262 }
1263 per_cpu(tvec_bases, cpu) = base;
1264 }
55c888d6 1265 spin_lock_init(&base->t_base.lock);
1da177e4
LT
1266 for (j = 0; j < TVN_SIZE; j++) {
1267 INIT_LIST_HEAD(base->tv5.vec + j);
1268 INIT_LIST_HEAD(base->tv4.vec + j);
1269 INIT_LIST_HEAD(base->tv3.vec + j);
1270 INIT_LIST_HEAD(base->tv2.vec + j);
1271 }
1272 for (j = 0; j < TVR_SIZE; j++)
1273 INIT_LIST_HEAD(base->tv1.vec + j);
1274
1275 base->timer_jiffies = jiffies;
a4a6198b 1276 return 0;
1da177e4
LT
1277}
1278
1279#ifdef CONFIG_HOTPLUG_CPU
55c888d6 1280static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1da177e4
LT
1281{
1282 struct timer_list *timer;
1283
1284 while (!list_empty(head)) {
1285 timer = list_entry(head->next, struct timer_list, entry);
55c888d6
ON
1286 detach_timer(timer, 0);
1287 timer->base = &new_base->t_base;
1da177e4 1288 internal_add_timer(new_base, timer);
1da177e4 1289 }
1da177e4
LT
1290}
1291
1292static void __devinit migrate_timers(int cpu)
1293{
1294 tvec_base_t *old_base;
1295 tvec_base_t *new_base;
1296 int i;
1297
1298 BUG_ON(cpu_online(cpu));
a4a6198b
JB
1299 old_base = per_cpu(tvec_bases, cpu);
1300 new_base = get_cpu_var(tvec_bases);
1da177e4
LT
1301
1302 local_irq_disable();
55c888d6
ON
1303 spin_lock(&new_base->t_base.lock);
1304 spin_lock(&old_base->t_base.lock);
1da177e4 1305
55c888d6 1306 if (old_base->t_base.running_timer)
1da177e4
LT
1307 BUG();
1308 for (i = 0; i < TVR_SIZE; i++)
55c888d6
ON
1309 migrate_timer_list(new_base, old_base->tv1.vec + i);
1310 for (i = 0; i < TVN_SIZE; i++) {
1311 migrate_timer_list(new_base, old_base->tv2.vec + i);
1312 migrate_timer_list(new_base, old_base->tv3.vec + i);
1313 migrate_timer_list(new_base, old_base->tv4.vec + i);
1314 migrate_timer_list(new_base, old_base->tv5.vec + i);
1315 }
1316
1317 spin_unlock(&old_base->t_base.lock);
1318 spin_unlock(&new_base->t_base.lock);
1da177e4
LT
1319 local_irq_enable();
1320 put_cpu_var(tvec_bases);
1da177e4
LT
1321}
1322#endif /* CONFIG_HOTPLUG_CPU */
1323
1324static int __devinit timer_cpu_notify(struct notifier_block *self,
1325 unsigned long action, void *hcpu)
1326{
1327 long cpu = (long)hcpu;
1328 switch(action) {
1329 case CPU_UP_PREPARE:
a4a6198b
JB
1330 if (init_timers_cpu(cpu) < 0)
1331 return NOTIFY_BAD;
1da177e4
LT
1332 break;
1333#ifdef CONFIG_HOTPLUG_CPU
1334 case CPU_DEAD:
1335 migrate_timers(cpu);
1336 break;
1337#endif
1338 default:
1339 break;
1340 }
1341 return NOTIFY_OK;
1342}
1343
1344static struct notifier_block __devinitdata timers_nb = {
1345 .notifier_call = timer_cpu_notify,
1346};
1347
1348
1349void __init init_timers(void)
1350{
1351 timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1352 (void *)(long)smp_processor_id());
1353 register_cpu_notifier(&timers_nb);
1354 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1355}
1356
1357#ifdef CONFIG_TIME_INTERPOLATION
1358
67890d70
CL
1359struct time_interpolator *time_interpolator __read_mostly;
1360static struct time_interpolator *time_interpolator_list __read_mostly;
1da177e4
LT
1361static DEFINE_SPINLOCK(time_interpolator_lock);
1362
1363static inline u64 time_interpolator_get_cycles(unsigned int src)
1364{
1365 unsigned long (*x)(void);
1366
1367 switch (src)
1368 {
1369 case TIME_SOURCE_FUNCTION:
1370 x = time_interpolator->addr;
1371 return x();
1372
1373 case TIME_SOURCE_MMIO64 :
685db65e 1374 return readq_relaxed((void __iomem *)time_interpolator->addr);
1da177e4
LT
1375
1376 case TIME_SOURCE_MMIO32 :
685db65e 1377 return readl_relaxed((void __iomem *)time_interpolator->addr);
1da177e4
LT
1378
1379 default: return get_cycles();
1380 }
1381}
1382
486d46ae 1383static inline u64 time_interpolator_get_counter(int writelock)
1da177e4
LT
1384{
1385 unsigned int src = time_interpolator->source;
1386
1387 if (time_interpolator->jitter)
1388 {
1389 u64 lcycle;
1390 u64 now;
1391
1392 do {
1393 lcycle = time_interpolator->last_cycle;
1394 now = time_interpolator_get_cycles(src);
1395 if (lcycle && time_after(lcycle, now))
1396 return lcycle;
486d46ae
AW
1397
1398 /* When holding the xtime write lock, there's no need
1399 * to add the overhead of the cmpxchg. Readers are
1400 * force to retry until the write lock is released.
1401 */
1402 if (writelock) {
1403 time_interpolator->last_cycle = now;
1404 return now;
1405 }
1da177e4
LT
1406 /* Keep track of the last timer value returned. The use of cmpxchg here
1407 * will cause contention in an SMP environment.
1408 */
1409 } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
1410 return now;
1411 }
1412 else
1413 return time_interpolator_get_cycles(src);
1414}
1415
1416void time_interpolator_reset(void)
1417{
1418 time_interpolator->offset = 0;
486d46ae 1419 time_interpolator->last_counter = time_interpolator_get_counter(1);
1da177e4
LT
1420}
1421
1422#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1423
1424unsigned long time_interpolator_get_offset(void)
1425{
1426 /* If we do not have a time interpolator set up then just return zero */
1427 if (!time_interpolator)
1428 return 0;
1429
1430 return time_interpolator->offset +
486d46ae 1431 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
1da177e4
LT
1432}
1433
1434#define INTERPOLATOR_ADJUST 65536
1435#define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1436
1437static void time_interpolator_update(long delta_nsec)
1438{
1439 u64 counter;
1440 unsigned long offset;
1441
1442 /* If there is no time interpolator set up then do nothing */
1443 if (!time_interpolator)
1444 return;
1445
a5a0d52c
AM
1446 /*
1447 * The interpolator compensates for late ticks by accumulating the late
1448 * time in time_interpolator->offset. A tick earlier than expected will
1449 * lead to a reset of the offset and a corresponding jump of the clock
1450 * forward. Again this only works if the interpolator clock is running
1451 * slightly slower than the regular clock and the tuning logic insures
1452 * that.
1453 */
1da177e4 1454
486d46ae 1455 counter = time_interpolator_get_counter(1);
a5a0d52c
AM
1456 offset = time_interpolator->offset +
1457 GET_TI_NSECS(counter, time_interpolator);
1da177e4
LT
1458
1459 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1460 time_interpolator->offset = offset - delta_nsec;
1461 else {
1462 time_interpolator->skips++;
1463 time_interpolator->ns_skipped += delta_nsec - offset;
1464 time_interpolator->offset = 0;
1465 }
1466 time_interpolator->last_counter = counter;
1467
1468 /* Tuning logic for time interpolator invoked every minute or so.
1469 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1470 * Increase interpolator clock speed if we skip too much time.
1471 */
1472 if (jiffies % INTERPOLATOR_ADJUST == 0)
1473 {
1474 if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
1475 time_interpolator->nsec_per_cyc--;
1476 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1477 time_interpolator->nsec_per_cyc++;
1478 time_interpolator->skips = 0;
1479 time_interpolator->ns_skipped = 0;
1480 }
1481}
1482
1483static inline int
1484is_better_time_interpolator(struct time_interpolator *new)
1485{
1486 if (!time_interpolator)
1487 return 1;
1488 return new->frequency > 2*time_interpolator->frequency ||
1489 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1490}
1491
1492void
1493register_time_interpolator(struct time_interpolator *ti)
1494{
1495 unsigned long flags;
1496
1497 /* Sanity check */
1498 if (ti->frequency == 0 || ti->mask == 0)
1499 BUG();
1500
1501 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1502 spin_lock(&time_interpolator_lock);
1503 write_seqlock_irqsave(&xtime_lock, flags);
1504 if (is_better_time_interpolator(ti)) {
1505 time_interpolator = ti;
1506 time_interpolator_reset();
1507 }
1508 write_sequnlock_irqrestore(&xtime_lock, flags);
1509
1510 ti->next = time_interpolator_list;
1511 time_interpolator_list = ti;
1512 spin_unlock(&time_interpolator_lock);
1513}
1514
1515void
1516unregister_time_interpolator(struct time_interpolator *ti)
1517{
1518 struct time_interpolator *curr, **prev;
1519 unsigned long flags;
1520
1521 spin_lock(&time_interpolator_lock);
1522 prev = &time_interpolator_list;
1523 for (curr = *prev; curr; curr = curr->next) {
1524 if (curr == ti) {
1525 *prev = curr->next;
1526 break;
1527 }
1528 prev = &curr->next;
1529 }
1530
1531 write_seqlock_irqsave(&xtime_lock, flags);
1532 if (ti == time_interpolator) {
1533 /* we lost the best time-interpolator: */
1534 time_interpolator = NULL;
1535 /* find the next-best interpolator */
1536 for (curr = time_interpolator_list; curr; curr = curr->next)
1537 if (is_better_time_interpolator(curr))
1538 time_interpolator = curr;
1539 time_interpolator_reset();
1540 }
1541 write_sequnlock_irqrestore(&xtime_lock, flags);
1542 spin_unlock(&time_interpolator_lock);
1543}
1544#endif /* CONFIG_TIME_INTERPOLATION */
1545
1546/**
1547 * msleep - sleep safely even with waitqueue interruptions
1548 * @msecs: Time in milliseconds to sleep for
1549 */
1550void msleep(unsigned int msecs)
1551{
1552 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1553
75bcc8c5
NA
1554 while (timeout)
1555 timeout = schedule_timeout_uninterruptible(timeout);
1da177e4
LT
1556}
1557
1558EXPORT_SYMBOL(msleep);
1559
1560/**
96ec3efd 1561 * msleep_interruptible - sleep waiting for signals
1da177e4
LT
1562 * @msecs: Time in milliseconds to sleep for
1563 */
1564unsigned long msleep_interruptible(unsigned int msecs)
1565{
1566 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1567
75bcc8c5
NA
1568 while (timeout && !signal_pending(current))
1569 timeout = schedule_timeout_interruptible(timeout);
1da177e4
LT
1570 return jiffies_to_msecs(timeout);
1571}
1572
1573EXPORT_SYMBOL(msleep_interruptible);