]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - kernel/posix-cpu-timers.c
cpu-timers: Return correct previous timer reload value
[net-next-2.6.git] / kernel / posix-cpu-timers.c
index 438ff4523513b945962e50a98eb8d4e2fbb28f5d..cce2f0b2d4067bac5132fc69baccde9a20200bb5 100644 (file)
 #include <trace/events/timer.h>
 
 /*
- * Called after updating RLIMIT_CPU to set timer expiration if necessary.
+ * Called after updating RLIMIT_CPU to run cpu timer and update
+ * tsk->signal->cputime_expires expiration cache if necessary. Needs
+ * siglock protection since other code may update expiration cache as
+ * well.
  */
 void update_rlimit_cpu(unsigned long rlim_new)
 {
        cputime_t cputime = secs_to_cputime(rlim_new);
-       struct signal_struct *const sig = current->signal;
 
-       if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
-           cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
-               spin_lock_irq(&current->sighand->siglock);
-               set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
-               spin_unlock_irq(&current->sighand->siglock);
-       }
+       spin_lock_irq(&current->sighand->siglock);
+       set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+       spin_unlock_irq(&current->sighand->siglock);
 }
 
 static int check_clock(const clockid_t which_clock)
@@ -548,107 +547,63 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
               cputime_gt(expires, new_exp);
 }
 
-static inline int expires_le(cputime_t expires, cputime_t new_exp)
-{
-       return !cputime_eq(expires, cputime_zero) &&
-              cputime_le(expires, new_exp);
-}
 /*
  * Insert the timer on the appropriate list before any timers that
  * expire later.  This must be called with the tasklist_lock held
  * for reading, and interrupts disabled.
  */
-static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
+static void arm_timer(struct k_itimer *timer)
 {
        struct task_struct *p = timer->it.cpu.task;
        struct list_head *head, *listpos;
+       struct task_cputime *cputime_expires;
        struct cpu_timer_list *const nt = &timer->it.cpu;
        struct cpu_timer_list *next;
-       unsigned long i;
 
-       head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
-               p->cpu_timers : p->signal->cpu_timers);
+       if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+               head = p->cpu_timers;
+               cputime_expires = &p->cputime_expires;
+       } else {
+               head = p->signal->cpu_timers;
+               cputime_expires = &p->signal->cputime_expires;
+       }
        head += CPUCLOCK_WHICH(timer->it_clock);
 
        BUG_ON(!irqs_disabled());
        spin_lock(&p->sighand->siglock);
 
        listpos = head;
-       if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
-               list_for_each_entry(next, head, entry) {
-                       if (next->expires.sched > nt->expires.sched)
-                               break;
-                       listpos = &next->entry;
-               }
-       } else {
-               list_for_each_entry(next, head, entry) {
-                       if (cputime_gt(next->expires.cpu, nt->expires.cpu))
-                               break;
-                       listpos = &next->entry;
-               }
+       list_for_each_entry(next, head, entry) {
+               if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
+                       break;
+               listpos = &next->entry;
        }
        list_add(&nt->entry, listpos);
 
        if (listpos == head) {
+               union cpu_time_count *exp = &nt->expires;
+
                /*
-                * We are the new earliest-expiring timer.
-                * If we are a thread timer, there can always
-                * be a process timer telling us to stop earlier.
+                * We are the new earliest-expiring POSIX 1.b timer, hence
+                * need to update expiration cache. Take into account that
+                * for process timers we share expiration cache with itimers
+                * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
                 */
 
-               if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
-                       union cpu_time_count *exp = &nt->expires;
-
-                       switch (CPUCLOCK_WHICH(timer->it_clock)) {
-                       default:
-                               BUG();
-                       case CPUCLOCK_PROF:
-                               if (expires_gt(p->cputime_expires.prof_exp,
-                                              exp->cpu))
-                                       p->cputime_expires.prof_exp = exp->cpu;
-                               break;
-                       case CPUCLOCK_VIRT:
-                               if (expires_gt(p->cputime_expires.virt_exp,
-                                              exp->cpu))
-                                       p->cputime_expires.virt_exp = exp->cpu;
-                               break;
-                       case CPUCLOCK_SCHED:
-                               if (p->cputime_expires.sched_exp == 0 ||
-                                   p->cputime_expires.sched_exp > exp->sched)
-                                       p->cputime_expires.sched_exp =
-                                                               exp->sched;
-                               break;
-                       }
-               } else {
-                       struct signal_struct *const sig = p->signal;
-                       union cpu_time_count *exp = &timer->it.cpu.expires;
-
-                       /*
-                        * For a process timer, set the cached expiration time.
-                        */
-                       switch (CPUCLOCK_WHICH(timer->it_clock)) {
-                       default:
-                               BUG();
-                       case CPUCLOCK_VIRT:
-                               if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
-                                              exp->cpu))
-                                       break;
-                               sig->cputime_expires.virt_exp = exp->cpu;
-                               break;
-                       case CPUCLOCK_PROF:
-                               if (expires_le(sig->it[CPUCLOCK_PROF].expires,
-                                              exp->cpu))
-                                       break;
-                               i = sig->rlim[RLIMIT_CPU].rlim_cur;
-                               if (i != RLIM_INFINITY &&
-                                   i <= cputime_to_secs(exp->cpu))
-                                       break;
-                               sig->cputime_expires.prof_exp = exp->cpu;
-                               break;
-                       case CPUCLOCK_SCHED:
-                               sig->cputime_expires.sched_exp = exp->sched;
-                               break;
-                       }
+               switch (CPUCLOCK_WHICH(timer->it_clock)) {
+               case CPUCLOCK_PROF:
+                       if (expires_gt(cputime_expires->prof_exp, exp->cpu))
+                               cputime_expires->prof_exp = exp->cpu;
+                       break;
+               case CPUCLOCK_VIRT:
+                       if (expires_gt(cputime_expires->virt_exp, exp->cpu))
+                               cputime_expires->virt_exp = exp->cpu;
+                       break;
+               case CPUCLOCK_SCHED:
+                       if (cputime_expires->sched_exp == 0 ||
+                           cputime_expires->sched_exp > exp->sched)
+                               cputime_expires->sched_exp = exp->sched;
+                       break;
                }
        }
 
@@ -721,7 +676,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
                        struct itimerspec *new, struct itimerspec *old)
 {
        struct task_struct *p = timer->it.cpu.task;
-       union cpu_time_count old_expires, new_expires, val;
+       union cpu_time_count old_expires, new_expires, old_incr, val;
        int ret;
 
        if (unlikely(p == NULL)) {
@@ -752,6 +707,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
        BUG_ON(!irqs_disabled());
 
        ret = 0;
+       old_incr = timer->it.cpu.incr;
        spin_lock(&p->sighand->siglock);
        old_expires = timer->it.cpu.expires;
        if (unlikely(timer->it.cpu.firing)) {
@@ -830,7 +786,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
        if (new_expires.sched != 0 &&
            (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
            cpu_time_before(timer->it_clock, val, new_expires)) {
-               arm_timer(timer, val);
+               arm_timer(timer);
        }
 
        read_unlock(&tasklist_lock);
@@ -867,7 +823,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
  out:
        if (old) {
                sample_to_timespec(timer->it_clock,
-                                  timer->it.cpu.incr, &old->it_interval);
+                                  old_incr, &old->it_interval);
        }
        return ret;
 }
@@ -982,6 +938,7 @@ static void check_thread_timers(struct task_struct *tsk,
        int maxfire;
        struct list_head *timers = tsk->cpu_timers;
        struct signal_struct *const sig = tsk->signal;
+       unsigned long soft;
 
        maxfire = 20;
        tsk->cputime_expires.prof_exp = cputime_zero;
@@ -1030,9 +987,10 @@ static void check_thread_timers(struct task_struct *tsk,
        /*
         * Check for the special case thread timers.
         */
-       if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
-               unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
-               unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
+       soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
+       if (soft != RLIM_INFINITY) {
+               unsigned long hard =
+                       ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
 
                if (hard != RLIM_INFINITY &&
                    tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
@@ -1043,14 +1001,13 @@ static void check_thread_timers(struct task_struct *tsk,
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
-               if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
+               if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
                        /*
                         * At the soft limit, send a SIGXCPU every second.
                         */
-                       if (sig->rlim[RLIMIT_RTTIME].rlim_cur
-                           < sig->rlim[RLIMIT_RTTIME].rlim_max) {
-                               sig->rlim[RLIMIT_RTTIME].rlim_cur +=
-                                                               USEC_PER_SEC;
+                       if (soft < hard) {
+                               soft += USEC_PER_SEC;
+                               sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
                        }
                        printk(KERN_INFO
                                "RT Watchdog Timeout: %s[%d]\n",
@@ -1121,6 +1078,7 @@ static void check_process_timers(struct task_struct *tsk,
        unsigned long long sum_sched_runtime, sched_expires;
        struct list_head *timers = sig->cpu_timers;
        struct task_cputime cputime;
+       unsigned long soft;
 
        /*
         * Don't sample the current process CPU clocks if there are no timers.
@@ -1193,11 +1151,13 @@ static void check_process_timers(struct task_struct *tsk,
                         SIGPROF);
        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
                         SIGVTALRM);
-
-       if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
+       soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
+       if (soft != RLIM_INFINITY) {
                unsigned long psecs = cputime_to_secs(ptime);
+               unsigned long hard =
+                       ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
                cputime_t x;
-               if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
+               if (psecs >= hard) {
                        /*
                         * At the hard limit, we just die.
                         * No need to calculate anything else now.
@@ -1205,17 +1165,17 @@ static void check_process_timers(struct task_struct *tsk,
                        __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
                        return;
                }
-               if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
+               if (psecs >= soft) {
                        /*
                         * At the soft limit, send a SIGXCPU every second.
                         */
                        __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
-                       if (sig->rlim[RLIMIT_CPU].rlim_cur
-                           < sig->rlim[RLIMIT_CPU].rlim_max) {
-                               sig->rlim[RLIMIT_CPU].rlim_cur++;
+                       if (soft < hard) {
+                               soft++;
+                               sig->rlim[RLIMIT_CPU].rlim_cur = soft;
                        }
                }
-               x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
+               x = secs_to_cputime(soft);
                if (cputime_eq(prof_expires, cputime_zero) ||
                    cputime_lt(x, prof_expires)) {
                        prof_expires = x;
@@ -1290,7 +1250,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
        /*
         * Now re-arm for the new expiry time.
         */
-       arm_timer(timer, now);
+       arm_timer(timer);
 
 out_unlock:
        read_unlock(&tasklist_lock);
@@ -1382,7 +1342,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
                        return 1;
        }
 
-       return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
+       return 0;
 }
 
 /*
@@ -1448,21 +1408,23 @@ void run_posix_cpu_timers(struct task_struct *tsk)
 }
 
 /*
- * Set one of the process-wide special case CPU timers.
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
  * The tsk->sighand->siglock must be held by the caller.
- * The *newval argument is relative and we update it to be absolute, *oldval
- * is absolute and we update it to be relative.
  */
 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
                           cputime_t *newval, cputime_t *oldval)
 {
        union cpu_time_count now;
-       struct list_head *head;
 
        BUG_ON(clock_idx == CPUCLOCK_SCHED);
        cpu_timer_sample_group(clock_idx, tsk, &now);
 
        if (oldval) {
+               /*
+                * We are setting itimer. The *oldval is absolute and we update
+                * it to be relative, *newval argument is relative and we update
+                * it to be absolute.
+                */
                if (!cputime_eq(*oldval, cputime_zero)) {
                        if (cputime_le(*oldval, now.cpu)) {
                                /* Just about to fire. */
@@ -1475,33 +1437,21 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
                if (cputime_eq(*newval, cputime_zero))
                        return;
                *newval = cputime_add(*newval, now.cpu);
-
-               /*
-                * If the RLIMIT_CPU timer will expire before the
-                * ITIMER_PROF timer, we have nothing else to do.
-                */
-               if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
-                   < cputime_to_secs(*newval))
-                       return;
        }
 
        /*
-        * Check whether there are any process timers already set to fire
-        * before this one.  If so, we don't have anything more to do.
+        * Update expiration cache if we are the earliest timer, or eventually
+        * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
         */
-       head = &tsk->signal->cpu_timers[clock_idx];
-       if (list_empty(head) ||
-           cputime_ge(list_first_entry(head,
-                                 struct cpu_timer_list, entry)->expires.cpu,
-                      *newval)) {
-               switch (clock_idx) {
-               case CPUCLOCK_PROF:
+       switch (clock_idx) {
+       case CPUCLOCK_PROF:
+               if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
                        tsk->signal->cputime_expires.prof_exp = *newval;
-                       break;
-               case CPUCLOCK_VIRT:
+               break;
+       case CPUCLOCK_VIRT:
+               if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
                        tsk->signal->cputime_expires.virt_exp = *newval;
-                       break;
-               }
+               break;
        }
 }