]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched_rt.c
sched: optimize RT affinity
[net-next-2.6.git] / kernel / sched_rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
4fd29176
SR
6#ifdef CONFIG_SMP
7static cpumask_t rt_overload_mask;
8static atomic_t rto_count;
9static inline int rt_overloaded(void)
10{
11 return atomic_read(&rto_count);
12}
13static inline cpumask_t *rt_overload(void)
14{
15 return &rt_overload_mask;
16}
17static inline void rt_set_overload(struct rq *rq)
18{
19 cpu_set(rq->cpu, rt_overload_mask);
20 /*
21 * Make sure the mask is visible before we set
22 * the overload count. That is checked to determine
23 * if we should look at the mask. It would be a shame
24 * if we looked at the mask, but the mask was not
25 * updated yet.
26 */
27 wmb();
28 atomic_inc(&rto_count);
29}
30static inline void rt_clear_overload(struct rq *rq)
31{
32 /* the order here really doesn't matter */
33 atomic_dec(&rto_count);
34 cpu_clear(rq->cpu, rt_overload_mask);
35}
73fe6aae
GH
36
37static void update_rt_migration(struct rq *rq)
38{
39 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
40 rt_set_overload(rq);
41 else
42 rt_clear_overload(rq);
43}
4fd29176
SR
44#endif /* CONFIG_SMP */
45
bb44e5d1
IM
46/*
47 * Update the current task's runtime statistics. Skip current tasks that
48 * are not in our scheduling class.
49 */
a9957449 50static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
51{
52 struct task_struct *curr = rq->curr;
53 u64 delta_exec;
54
55 if (!task_has_rt_policy(curr))
56 return;
57
d281918d 58 delta_exec = rq->clock - curr->se.exec_start;
bb44e5d1
IM
59 if (unlikely((s64)delta_exec < 0))
60 delta_exec = 0;
6cfb0d5d
IM
61
62 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
bb44e5d1
IM
63
64 curr->se.sum_exec_runtime += delta_exec;
d281918d 65 curr->se.exec_start = rq->clock;
d842de87 66 cpuacct_charge(curr, delta_exec);
bb44e5d1
IM
67}
68
63489e45
SR
69static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
70{
71 WARN_ON(!rt_task(p));
72 rq->rt.rt_nr_running++;
764a9d6f
SR
73#ifdef CONFIG_SMP
74 if (p->prio < rq->rt.highest_prio)
75 rq->rt.highest_prio = p->prio;
73fe6aae
GH
76 if (p->nr_cpus_allowed > 1)
77 rq->rt.rt_nr_migratory++;
78
79 update_rt_migration(rq);
764a9d6f 80#endif /* CONFIG_SMP */
63489e45
SR
81}
82
83static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
84{
85 WARN_ON(!rt_task(p));
86 WARN_ON(!rq->rt.rt_nr_running);
87 rq->rt.rt_nr_running--;
764a9d6f
SR
88#ifdef CONFIG_SMP
89 if (rq->rt.rt_nr_running) {
90 struct rt_prio_array *array;
91
92 WARN_ON(p->prio < rq->rt.highest_prio);
93 if (p->prio == rq->rt.highest_prio) {
94 /* recalculate */
95 array = &rq->rt.active;
96 rq->rt.highest_prio =
97 sched_find_first_bit(array->bitmap);
98 } /* otherwise leave rq->highest prio alone */
99 } else
100 rq->rt.highest_prio = MAX_RT_PRIO;
73fe6aae
GH
101 if (p->nr_cpus_allowed > 1)
102 rq->rt.rt_nr_migratory--;
103
104 update_rt_migration(rq);
764a9d6f 105#endif /* CONFIG_SMP */
63489e45
SR
106}
107
fd390f6a 108static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
bb44e5d1
IM
109{
110 struct rt_prio_array *array = &rq->rt.active;
111
112 list_add_tail(&p->run_list, array->queue + p->prio);
113 __set_bit(p->prio, array->bitmap);
58e2d4ca 114 inc_cpu_load(rq, p->se.load.weight);
63489e45
SR
115
116 inc_rt_tasks(p, rq);
bb44e5d1
IM
117}
118
119/*
120 * Adding/removing a task to/from a priority array:
121 */
f02231e5 122static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
bb44e5d1
IM
123{
124 struct rt_prio_array *array = &rq->rt.active;
125
f1e14ef6 126 update_curr_rt(rq);
bb44e5d1
IM
127
128 list_del(&p->run_list);
129 if (list_empty(array->queue + p->prio))
130 __clear_bit(p->prio, array->bitmap);
58e2d4ca 131 dec_cpu_load(rq, p->se.load.weight);
63489e45
SR
132
133 dec_rt_tasks(p, rq);
bb44e5d1
IM
134}
135
136/*
137 * Put task to the end of the run list without the overhead of dequeue
138 * followed by enqueue.
139 */
140static void requeue_task_rt(struct rq *rq, struct task_struct *p)
141{
142 struct rt_prio_array *array = &rq->rt.active;
143
144 list_move_tail(&p->run_list, array->queue + p->prio);
145}
146
147static void
4530d7ab 148yield_task_rt(struct rq *rq)
bb44e5d1 149{
4530d7ab 150 requeue_task_rt(rq, rq->curr);
bb44e5d1
IM
151}
152
e7693a36 153#ifdef CONFIG_SMP
318e0893
GH
154static int find_lowest_rq(struct task_struct *task);
155
e7693a36
GH
156static int select_task_rq_rt(struct task_struct *p, int sync)
157{
318e0893
GH
158 struct rq *rq = task_rq(p);
159
160 /*
161 * If the task will not preempt the RQ, try to find a better RQ
162 * before we even activate the task
163 */
164 if ((p->prio >= rq->rt.highest_prio)
165 && (p->nr_cpus_allowed > 1)) {
166 int cpu = find_lowest_rq(p);
167
168 return (cpu == -1) ? task_cpu(p) : cpu;
169 }
170
171 /*
172 * Otherwise, just let it ride on the affined RQ and the
173 * post-schedule router will push the preempted task away
174 */
e7693a36
GH
175 return task_cpu(p);
176}
177#endif /* CONFIG_SMP */
178
bb44e5d1
IM
179/*
180 * Preempt the current task with a newly woken task if needed:
181 */
182static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
183{
184 if (p->prio < rq->curr->prio)
185 resched_task(rq->curr);
186}
187
fb8d4724 188static struct task_struct *pick_next_task_rt(struct rq *rq)
bb44e5d1
IM
189{
190 struct rt_prio_array *array = &rq->rt.active;
191 struct task_struct *next;
192 struct list_head *queue;
193 int idx;
194
195 idx = sched_find_first_bit(array->bitmap);
196 if (idx >= MAX_RT_PRIO)
197 return NULL;
198
199 queue = array->queue + idx;
200 next = list_entry(queue->next, struct task_struct, run_list);
201
d281918d 202 next->se.exec_start = rq->clock;
bb44e5d1
IM
203
204 return next;
205}
206
31ee529c 207static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 208{
f1e14ef6 209 update_curr_rt(rq);
bb44e5d1
IM
210 p->se.exec_start = 0;
211}
212
681f3e68 213#ifdef CONFIG_SMP
e8fa1362
SR
214/* Only try algorithms three times */
215#define RT_MAX_TRIES 3
216
217static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
218static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
219
f65eda4f
SR
220static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
221{
222 if (!task_running(rq, p) &&
73fe6aae
GH
223 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
224 (p->nr_cpus_allowed > 1))
f65eda4f
SR
225 return 1;
226 return 0;
227}
228
e8fa1362 229/* Return the second highest RT task, NULL otherwise */
f65eda4f
SR
230static struct task_struct *pick_next_highest_task_rt(struct rq *rq,
231 int cpu)
e8fa1362
SR
232{
233 struct rt_prio_array *array = &rq->rt.active;
234 struct task_struct *next;
235 struct list_head *queue;
236 int idx;
237
238 assert_spin_locked(&rq->lock);
239
240 if (likely(rq->rt.rt_nr_running < 2))
241 return NULL;
242
243 idx = sched_find_first_bit(array->bitmap);
244 if (unlikely(idx >= MAX_RT_PRIO)) {
245 WARN_ON(1); /* rt_nr_running is bad */
246 return NULL;
247 }
248
249 queue = array->queue + idx;
f65eda4f
SR
250 BUG_ON(list_empty(queue));
251
e8fa1362 252 next = list_entry(queue->next, struct task_struct, run_list);
f65eda4f
SR
253 if (unlikely(pick_rt_task(rq, next, cpu)))
254 goto out;
e8fa1362
SR
255
256 if (queue->next->next != queue) {
257 /* same prio task */
258 next = list_entry(queue->next->next, struct task_struct, run_list);
f65eda4f
SR
259 if (pick_rt_task(rq, next, cpu))
260 goto out;
e8fa1362
SR
261 }
262
f65eda4f 263 retry:
e8fa1362
SR
264 /* slower, but more flexible */
265 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
f65eda4f 266 if (unlikely(idx >= MAX_RT_PRIO))
e8fa1362 267 return NULL;
e8fa1362
SR
268
269 queue = array->queue + idx;
f65eda4f
SR
270 BUG_ON(list_empty(queue));
271
272 list_for_each_entry(next, queue, run_list) {
273 if (pick_rt_task(rq, next, cpu))
274 goto out;
275 }
276
277 goto retry;
e8fa1362 278
f65eda4f 279 out:
e8fa1362
SR
280 return next;
281}
282
283static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
6e1254d2 284static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);
e8fa1362 285
6e1254d2 286static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
e8fa1362 287{
6e1254d2
GH
288 int cpu;
289 cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
290 int lowest_prio = -1;
291 int ret = 0;
e8fa1362 292
6e1254d2
GH
293 cpus_clear(*lowest_mask);
294 cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
e8fa1362 295
07b4032c
GH
296 /*
297 * Scan each rq for the lowest prio.
298 */
6e1254d2 299 for_each_cpu_mask(cpu, *valid_mask) {
07b4032c 300 struct rq *rq = cpu_rq(cpu);
e8fa1362 301
07b4032c
GH
302 /* We look for lowest RT prio or non-rt CPU */
303 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
6e1254d2
GH
304 if (ret)
305 cpus_clear(*lowest_mask);
306 cpu_set(rq->cpu, *lowest_mask);
307 return 1;
07b4032c
GH
308 }
309
310 /* no locking for now */
6e1254d2
GH
311 if ((rq->rt.highest_prio > task->prio)
312 && (rq->rt.highest_prio >= lowest_prio)) {
313 if (rq->rt.highest_prio > lowest_prio) {
314 /* new low - clear old data */
315 lowest_prio = rq->rt.highest_prio;
316 cpus_clear(*lowest_mask);
317 }
318 cpu_set(rq->cpu, *lowest_mask);
319 ret = 1;
e8fa1362 320 }
07b4032c
GH
321 }
322
6e1254d2
GH
323 return ret;
324}
325
326static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
327{
328 int first;
329
330 /* "this_cpu" is cheaper to preempt than a remote processor */
331 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
332 return this_cpu;
333
334 first = first_cpu(*mask);
335 if (first != NR_CPUS)
336 return first;
337
338 return -1;
339}
340
341static int find_lowest_rq(struct task_struct *task)
342{
343 struct sched_domain *sd;
344 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
345 int this_cpu = smp_processor_id();
346 int cpu = task_cpu(task);
347
348 if (!find_lowest_cpus(task, lowest_mask))
349 return -1;
350
351 /*
352 * At this point we have built a mask of cpus representing the
353 * lowest priority tasks in the system. Now we want to elect
354 * the best one based on our affinity and topology.
355 *
356 * We prioritize the last cpu that the task executed on since
357 * it is most likely cache-hot in that location.
358 */
359 if (cpu_isset(cpu, *lowest_mask))
360 return cpu;
361
362 /*
363 * Otherwise, we consult the sched_domains span maps to figure
364 * out which cpu is logically closest to our hot cache data.
365 */
366 if (this_cpu == cpu)
367 this_cpu = -1; /* Skip this_cpu opt if the same */
368
369 for_each_domain(cpu, sd) {
370 if (sd->flags & SD_WAKE_AFFINE) {
371 cpumask_t domain_mask;
372 int best_cpu;
373
374 cpus_and(domain_mask, sd->span, *lowest_mask);
375
376 best_cpu = pick_optimal_cpu(this_cpu,
377 &domain_mask);
378 if (best_cpu != -1)
379 return best_cpu;
380 }
381 }
382
383 /*
384 * And finally, if there were no matches within the domains
385 * just give the caller *something* to work with from the compatible
386 * locations.
387 */
388 return pick_optimal_cpu(this_cpu, lowest_mask);
07b4032c
GH
389}
390
391/* Will lock the rq it finds */
392static struct rq *find_lock_lowest_rq(struct task_struct *task,
393 struct rq *rq)
394{
395 struct rq *lowest_rq = NULL;
396 int cpu;
397 int tries;
e8fa1362 398
07b4032c
GH
399 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
400 cpu = find_lowest_rq(task);
401
2de0b463 402 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
403 break;
404
07b4032c
GH
405 lowest_rq = cpu_rq(cpu);
406
e8fa1362 407 /* if the prio of this runqueue changed, try again */
07b4032c 408 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
409 /*
410 * We had to unlock the run queue. In
411 * the mean time, task could have
412 * migrated already or had its affinity changed.
413 * Also make sure that it wasn't scheduled on its rq.
414 */
07b4032c 415 if (unlikely(task_rq(task) != rq ||
e8fa1362 416 !cpu_isset(lowest_rq->cpu, task->cpus_allowed) ||
07b4032c 417 task_running(rq, task) ||
e8fa1362
SR
418 !task->se.on_rq)) {
419 spin_unlock(&lowest_rq->lock);
420 lowest_rq = NULL;
421 break;
422 }
423 }
424
425 /* If this rq is still suitable use it. */
426 if (lowest_rq->rt.highest_prio > task->prio)
427 break;
428
429 /* try again */
430 spin_unlock(&lowest_rq->lock);
431 lowest_rq = NULL;
432 }
433
434 return lowest_rq;
435}
436
437/*
438 * If the current CPU has more than one RT task, see if the non
439 * running task can migrate over to a CPU that is running a task
440 * of lesser priority.
441 */
697f0a48 442static int push_rt_task(struct rq *rq)
e8fa1362
SR
443{
444 struct task_struct *next_task;
445 struct rq *lowest_rq;
446 int ret = 0;
447 int paranoid = RT_MAX_TRIES;
448
697f0a48 449 assert_spin_locked(&rq->lock);
e8fa1362 450
697f0a48 451 next_task = pick_next_highest_task_rt(rq, -1);
e8fa1362
SR
452 if (!next_task)
453 return 0;
454
455 retry:
697f0a48 456 if (unlikely(next_task == rq->curr)) {
f65eda4f 457 WARN_ON(1);
e8fa1362 458 return 0;
f65eda4f 459 }
e8fa1362
SR
460
461 /*
462 * It's possible that the next_task slipped in of
463 * higher priority than current. If that's the case
464 * just reschedule current.
465 */
697f0a48
GH
466 if (unlikely(next_task->prio < rq->curr->prio)) {
467 resched_task(rq->curr);
e8fa1362
SR
468 return 0;
469 }
470
697f0a48 471 /* We might release rq lock */
e8fa1362
SR
472 get_task_struct(next_task);
473
474 /* find_lock_lowest_rq locks the rq if found */
697f0a48 475 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
476 if (!lowest_rq) {
477 struct task_struct *task;
478 /*
697f0a48 479 * find lock_lowest_rq releases rq->lock
e8fa1362
SR
480 * so it is possible that next_task has changed.
481 * If it has, then try again.
482 */
697f0a48 483 task = pick_next_highest_task_rt(rq, -1);
e8fa1362
SR
484 if (unlikely(task != next_task) && task && paranoid--) {
485 put_task_struct(next_task);
486 next_task = task;
487 goto retry;
488 }
489 goto out;
490 }
491
492 assert_spin_locked(&lowest_rq->lock);
493
697f0a48 494 deactivate_task(rq, next_task, 0);
e8fa1362
SR
495 set_task_cpu(next_task, lowest_rq->cpu);
496 activate_task(lowest_rq, next_task, 0);
497
498 resched_task(lowest_rq->curr);
499
500 spin_unlock(&lowest_rq->lock);
501
502 ret = 1;
503out:
504 put_task_struct(next_task);
505
506 return ret;
507}
508
509/*
510 * TODO: Currently we just use the second highest prio task on
511 * the queue, and stop when it can't migrate (or there's
512 * no more RT tasks). There may be a case where a lower
513 * priority RT task has a different affinity than the
514 * higher RT task. In this case the lower RT task could
515 * possibly be able to migrate where as the higher priority
516 * RT task could not. We currently ignore this issue.
517 * Enhancements are welcome!
518 */
519static void push_rt_tasks(struct rq *rq)
520{
521 /* push_rt_task will return true if it moved an RT */
522 while (push_rt_task(rq))
523 ;
524}
525
f65eda4f
SR
526static int pull_rt_task(struct rq *this_rq)
527{
528 struct task_struct *next;
529 struct task_struct *p;
530 struct rq *src_rq;
531 cpumask_t *rto_cpumask;
532 int this_cpu = this_rq->cpu;
533 int cpu;
534 int ret = 0;
535
536 assert_spin_locked(&this_rq->lock);
537
538 /*
539 * If cpusets are used, and we have overlapping
540 * run queue cpusets, then this algorithm may not catch all.
541 * This is just the price you pay on trying to keep
542 * dirtying caches down on large SMP machines.
543 */
544 if (likely(!rt_overloaded()))
545 return 0;
546
547 next = pick_next_task_rt(this_rq);
548
549 rto_cpumask = rt_overload();
550
551 for_each_cpu_mask(cpu, *rto_cpumask) {
552 if (this_cpu == cpu)
553 continue;
554
555 src_rq = cpu_rq(cpu);
556 if (unlikely(src_rq->rt.rt_nr_running <= 1)) {
557 /*
558 * It is possible that overlapping cpusets
559 * will miss clearing a non overloaded runqueue.
560 * Clear it now.
561 */
562 if (double_lock_balance(this_rq, src_rq)) {
563 /* unlocked our runqueue lock */
564 struct task_struct *old_next = next;
565 next = pick_next_task_rt(this_rq);
566 if (next != old_next)
567 ret = 1;
568 }
569 if (likely(src_rq->rt.rt_nr_running <= 1))
570 /*
571 * Small chance that this_rq->curr changed
572 * but it's really harmless here.
573 */
574 rt_clear_overload(this_rq);
575 else
576 /*
577 * Heh, the src_rq is now overloaded, since
578 * we already have the src_rq lock, go straight
579 * to pulling tasks from it.
580 */
581 goto try_pulling;
582 spin_unlock(&src_rq->lock);
583 continue;
584 }
585
586 /*
587 * We can potentially drop this_rq's lock in
588 * double_lock_balance, and another CPU could
589 * steal our next task - hence we must cause
590 * the caller to recalculate the next task
591 * in that case:
592 */
593 if (double_lock_balance(this_rq, src_rq)) {
594 struct task_struct *old_next = next;
595 next = pick_next_task_rt(this_rq);
596 if (next != old_next)
597 ret = 1;
598 }
599
600 /*
601 * Are there still pullable RT tasks?
602 */
603 if (src_rq->rt.rt_nr_running <= 1) {
604 spin_unlock(&src_rq->lock);
605 continue;
606 }
607
608 try_pulling:
609 p = pick_next_highest_task_rt(src_rq, this_cpu);
610
611 /*
612 * Do we have an RT task that preempts
613 * the to-be-scheduled task?
614 */
615 if (p && (!next || (p->prio < next->prio))) {
616 WARN_ON(p == src_rq->curr);
617 WARN_ON(!p->se.on_rq);
618
619 /*
620 * There's a chance that p is higher in priority
621 * than what's currently running on its cpu.
622 * This is just that p is wakeing up and hasn't
623 * had a chance to schedule. We only pull
624 * p if it is lower in priority than the
625 * current task on the run queue or
626 * this_rq next task is lower in prio than
627 * the current task on that rq.
628 */
629 if (p->prio < src_rq->curr->prio ||
630 (next && next->prio < src_rq->curr->prio))
631 goto bail;
632
633 ret = 1;
634
635 deactivate_task(src_rq, p, 0);
636 set_task_cpu(p, this_cpu);
637 activate_task(this_rq, p, 0);
638 /*
639 * We continue with the search, just in
640 * case there's an even higher prio task
641 * in another runqueue. (low likelyhood
642 * but possible)
643 */
644
645 /*
646 * Update next so that we won't pick a task
647 * on another cpu with a priority lower (or equal)
648 * than the one we just picked.
649 */
650 next = p;
651
652 }
653 bail:
654 spin_unlock(&src_rq->lock);
655 }
656
657 return ret;
658}
659
660static void schedule_balance_rt(struct rq *rq,
661 struct task_struct *prev)
662{
663 /* Try to pull RT tasks here if we lower this rq's prio */
664 if (unlikely(rt_task(prev)) &&
665 rq->rt.highest_prio > prev->prio)
666 pull_rt_task(rq);
667}
668
e8fa1362
SR
669static void schedule_tail_balance_rt(struct rq *rq)
670{
671 /*
672 * If we have more than one rt_task queued, then
673 * see if we can push the other rt_tasks off to other CPUS.
674 * Note we may release the rq lock, and since
675 * the lock was owned by prev, we need to release it
676 * first via finish_lock_switch and then reaquire it here.
677 */
678 if (unlikely(rq->rt.rt_nr_running > 1)) {
679 spin_lock_irq(&rq->lock);
680 push_rt_tasks(rq);
681 spin_unlock_irq(&rq->lock);
682 }
683}
684
4642dafd
SR
685
686static void wakeup_balance_rt(struct rq *rq, struct task_struct *p)
687{
688 if (unlikely(rt_task(p)) &&
689 !task_running(rq, p) &&
690 (p->prio >= rq->curr->prio))
691 push_rt_tasks(rq);
692}
693
43010659 694static unsigned long
bb44e5d1 695load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f
PW
696 unsigned long max_load_move,
697 struct sched_domain *sd, enum cpu_idle_type idle,
698 int *all_pinned, int *this_best_prio)
bb44e5d1 699{
c7a1e46a
SR
700 /* don't touch RT tasks */
701 return 0;
e1d1484f
PW
702}
703
704static int
705move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
706 struct sched_domain *sd, enum cpu_idle_type idle)
707{
c7a1e46a
SR
708 /* don't touch RT tasks */
709 return 0;
bb44e5d1 710}
73fe6aae
GH
711static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
712{
713 int weight = cpus_weight(*new_mask);
714
715 BUG_ON(!rt_task(p));
716
717 /*
718 * Update the migration status of the RQ if we have an RT task
719 * which is running AND changing its weight value.
720 */
721 if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
722 struct rq *rq = task_rq(p);
723
724 if ((p->nr_cpus_allowed <= 1) && (weight > 1))
725 rq->rt.rt_nr_migratory++;
726 else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
727 BUG_ON(!rq->rt.rt_nr_migratory);
728 rq->rt.rt_nr_migratory--;
729 }
730
731 update_rt_migration(rq);
732 }
733
734 p->cpus_allowed = *new_mask;
735 p->nr_cpus_allowed = weight;
736}
e8fa1362
SR
737#else /* CONFIG_SMP */
738# define schedule_tail_balance_rt(rq) do { } while (0)
f65eda4f 739# define schedule_balance_rt(rq, prev) do { } while (0)
4642dafd 740# define wakeup_balance_rt(rq, p) do { } while (0)
e8fa1362 741#endif /* CONFIG_SMP */
bb44e5d1
IM
742
743static void task_tick_rt(struct rq *rq, struct task_struct *p)
744{
67e2be02
PZ
745 update_curr_rt(rq);
746
bb44e5d1
IM
747 /*
748 * RR tasks need a special form of timeslice management.
749 * FIFO tasks have no timeslices.
750 */
751 if (p->policy != SCHED_RR)
752 return;
753
754 if (--p->time_slice)
755 return;
756
a4ec24b4 757 p->time_slice = DEF_TIMESLICE;
bb44e5d1 758
98fbc798
DA
759 /*
760 * Requeue to the end of queue if we are not the only element
761 * on the queue:
762 */
763 if (p->run_list.prev != p->run_list.next) {
764 requeue_task_rt(rq, p);
765 set_tsk_need_resched(p);
766 }
bb44e5d1
IM
767}
768
83b699ed
SV
769static void set_curr_task_rt(struct rq *rq)
770{
771 struct task_struct *p = rq->curr;
772
773 p->se.exec_start = rq->clock;
774}
775
5522d5d5
IM
776const struct sched_class rt_sched_class = {
777 .next = &fair_sched_class,
bb44e5d1
IM
778 .enqueue_task = enqueue_task_rt,
779 .dequeue_task = dequeue_task_rt,
780 .yield_task = yield_task_rt,
e7693a36
GH
781#ifdef CONFIG_SMP
782 .select_task_rq = select_task_rq_rt,
783#endif /* CONFIG_SMP */
bb44e5d1
IM
784
785 .check_preempt_curr = check_preempt_curr_rt,
786
787 .pick_next_task = pick_next_task_rt,
788 .put_prev_task = put_prev_task_rt,
789
681f3e68 790#ifdef CONFIG_SMP
bb44e5d1 791 .load_balance = load_balance_rt,
e1d1484f 792 .move_one_task = move_one_task_rt,
73fe6aae 793 .set_cpus_allowed = set_cpus_allowed_rt,
681f3e68 794#endif
bb44e5d1 795
83b699ed 796 .set_curr_task = set_curr_task_rt,
bb44e5d1 797 .task_tick = task_tick_rt,
bb44e5d1 798};