]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched_rt.c
ps3: gelic: updown_lock semaphore to mutex
[net-next-2.6.git] / kernel / sched_rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
4fd29176 6#ifdef CONFIG_SMP
84de4274 7
637f5085 8static inline int rt_overloaded(struct rq *rq)
4fd29176 9{
637f5085 10 return atomic_read(&rq->rd->rto_count);
4fd29176 11}
84de4274 12
4fd29176
SR
13static inline void rt_set_overload(struct rq *rq)
14{
637f5085 15 cpu_set(rq->cpu, rq->rd->rto_mask);
4fd29176
SR
16 /*
17 * Make sure the mask is visible before we set
18 * the overload count. That is checked to determine
19 * if we should look at the mask. It would be a shame
20 * if we looked at the mask, but the mask was not
21 * updated yet.
22 */
23 wmb();
637f5085 24 atomic_inc(&rq->rd->rto_count);
4fd29176 25}
84de4274 26
4fd29176
SR
27static inline void rt_clear_overload(struct rq *rq)
28{
29 /* the order here really doesn't matter */
637f5085
GH
30 atomic_dec(&rq->rd->rto_count);
31 cpu_clear(rq->cpu, rq->rd->rto_mask);
4fd29176 32}
73fe6aae
GH
33
34static void update_rt_migration(struct rq *rq)
35{
637f5085 36 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
cdc8eb98
GH
37 if (!rq->rt.overloaded) {
38 rt_set_overload(rq);
39 rq->rt.overloaded = 1;
40 }
41 } else if (rq->rt.overloaded) {
73fe6aae 42 rt_clear_overload(rq);
637f5085
GH
43 rq->rt.overloaded = 0;
44 }
73fe6aae 45}
4fd29176
SR
46#endif /* CONFIG_SMP */
47
6f505b16 48static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
fa85ae24 49{
6f505b16
PZ
50 return container_of(rt_se, struct task_struct, rt);
51}
52
53static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54{
55 return !list_empty(&rt_se->run_list);
56}
57
052f1dc7 58#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 59
9f0c1e56 60static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b16
PZ
61{
62 if (!rt_rq->tg)
9f0c1e56 63 return RUNTIME_INF;
6f505b16 64
ac086bc2
PZ
65 return rt_rq->rt_runtime;
66}
67
68static inline u64 sched_rt_period(struct rt_rq *rt_rq)
69{
70 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b16
PZ
71}
72
73#define for_each_leaf_rt_rq(rt_rq, rq) \
74 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
75
76static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
77{
78 return rt_rq->rq;
79}
80
81static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
82{
83 return rt_se->rt_rq;
84}
85
86#define for_each_sched_rt_entity(rt_se) \
87 for (; rt_se; rt_se = rt_se->parent)
88
89static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
90{
91 return rt_se->my_q;
92}
93
94static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
95static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
96
9f0c1e56 97static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16
PZ
98{
99 struct sched_rt_entity *rt_se = rt_rq->rt_se;
100
101 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
1020387f
PZ
102 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
103
6f505b16 104 enqueue_rt_entity(rt_se);
1020387f
PZ
105 if (rt_rq->highest_prio < curr->prio)
106 resched_task(curr);
6f505b16
PZ
107 }
108}
109
9f0c1e56 110static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
111{
112 struct sched_rt_entity *rt_se = rt_rq->rt_se;
113
114 if (rt_se && on_rt_rq(rt_se))
115 dequeue_rt_entity(rt_se);
116}
117
23b0fdfc
PZ
118static inline int rt_rq_throttled(struct rt_rq *rt_rq)
119{
120 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
121}
122
123static int rt_se_boosted(struct sched_rt_entity *rt_se)
124{
125 struct rt_rq *rt_rq = group_rt_rq(rt_se);
126 struct task_struct *p;
127
128 if (rt_rq)
129 return !!rt_rq->rt_nr_boosted;
130
131 p = rt_task_of(rt_se);
132 return p->prio != p->normal_prio;
133}
134
d0b27fa7
PZ
135#ifdef CONFIG_SMP
136static inline cpumask_t sched_rt_period_mask(void)
137{
138 return cpu_rq(smp_processor_id())->rd->span;
139}
6f505b16 140#else
d0b27fa7
PZ
141static inline cpumask_t sched_rt_period_mask(void)
142{
143 return cpu_online_map;
144}
145#endif
6f505b16 146
d0b27fa7
PZ
147static inline
148struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b16 149{
d0b27fa7
PZ
150 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
151}
9f0c1e56 152
ac086bc2
PZ
153static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
154{
155 return &rt_rq->tg->rt_bandwidth;
156}
157
d0b27fa7
PZ
158#else
159
160static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
161{
ac086bc2
PZ
162 return rt_rq->rt_runtime;
163}
164
165static inline u64 sched_rt_period(struct rt_rq *rt_rq)
166{
167 return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b16
PZ
168}
169
170#define for_each_leaf_rt_rq(rt_rq, rq) \
171 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
172
173static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
174{
175 return container_of(rt_rq, struct rq, rt);
176}
177
178static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
179{
180 struct task_struct *p = rt_task_of(rt_se);
181 struct rq *rq = task_rq(p);
182
183 return &rq->rt;
184}
185
186#define for_each_sched_rt_entity(rt_se) \
187 for (; rt_se; rt_se = NULL)
188
189static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
190{
191 return NULL;
192}
193
9f0c1e56 194static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16
PZ
195{
196}
197
9f0c1e56 198static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
199{
200}
201
23b0fdfc
PZ
202static inline int rt_rq_throttled(struct rt_rq *rt_rq)
203{
204 return rt_rq->rt_throttled;
205}
d0b27fa7
PZ
206
207static inline cpumask_t sched_rt_period_mask(void)
208{
209 return cpu_online_map;
210}
211
212static inline
213struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
214{
215 return &cpu_rq(cpu)->rt;
216}
217
ac086bc2
PZ
218static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
219{
220 return &def_rt_bandwidth;
221}
222
6f505b16
PZ
223#endif
224
d0b27fa7
PZ
225static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
226{
227 int i, idle = 1;
228 cpumask_t span;
229
230 if (rt_b->rt_runtime == RUNTIME_INF)
231 return 1;
232
233 span = sched_rt_period_mask();
234 for_each_cpu_mask(i, span) {
235 int enqueue = 0;
236 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
237 struct rq *rq = rq_of_rt_rq(rt_rq);
238
239 spin_lock(&rq->lock);
240 if (rt_rq->rt_time) {
ac086bc2 241 u64 runtime;
d0b27fa7 242
ac086bc2
PZ
243 spin_lock(&rt_rq->rt_runtime_lock);
244 runtime = rt_rq->rt_runtime;
d0b27fa7
PZ
245 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
246 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
247 rt_rq->rt_throttled = 0;
248 enqueue = 1;
249 }
250 if (rt_rq->rt_time || rt_rq->rt_nr_running)
251 idle = 0;
ac086bc2 252 spin_unlock(&rt_rq->rt_runtime_lock);
d0b27fa7
PZ
253 }
254
255 if (enqueue)
256 sched_rt_rq_enqueue(rt_rq);
257 spin_unlock(&rq->lock);
258 }
259
260 return idle;
261}
262
ac086bc2
PZ
263#ifdef CONFIG_SMP
264static int balance_runtime(struct rt_rq *rt_rq)
265{
266 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
267 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
268 int i, weight, more = 0;
269 u64 rt_period;
270
271 weight = cpus_weight(rd->span);
272
273 spin_lock(&rt_b->rt_runtime_lock);
274 rt_period = ktime_to_ns(rt_b->rt_period);
275 for_each_cpu_mask(i, rd->span) {
276 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
277 s64 diff;
278
279 if (iter == rt_rq)
280 continue;
281
282 spin_lock(&iter->rt_runtime_lock);
283 diff = iter->rt_runtime - iter->rt_time;
284 if (diff > 0) {
285 do_div(diff, weight);
286 if (rt_rq->rt_runtime + diff > rt_period)
287 diff = rt_period - rt_rq->rt_runtime;
288 iter->rt_runtime -= diff;
289 rt_rq->rt_runtime += diff;
290 more = 1;
291 if (rt_rq->rt_runtime == rt_period) {
292 spin_unlock(&iter->rt_runtime_lock);
293 break;
294 }
295 }
296 spin_unlock(&iter->rt_runtime_lock);
297 }
298 spin_unlock(&rt_b->rt_runtime_lock);
299
300 return more;
301}
302#endif
303
6f505b16
PZ
304static inline int rt_se_prio(struct sched_rt_entity *rt_se)
305{
052f1dc7 306#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
307 struct rt_rq *rt_rq = group_rt_rq(rt_se);
308
309 if (rt_rq)
310 return rt_rq->highest_prio;
311#endif
312
313 return rt_task_of(rt_se)->prio;
314}
315
9f0c1e56 316static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b16 317{
9f0c1e56 318 u64 runtime = sched_rt_runtime(rt_rq);
fa85ae24 319
9f0c1e56 320 if (runtime == RUNTIME_INF)
fa85ae24
PZ
321 return 0;
322
323 if (rt_rq->rt_throttled)
23b0fdfc 324 return rt_rq_throttled(rt_rq);
fa85ae24 325
ac086bc2
PZ
326 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
327 return 0;
328
329#ifdef CONFIG_SMP
330 if (rt_rq->rt_time > runtime) {
331 int more;
332
333 spin_unlock(&rt_rq->rt_runtime_lock);
334 more = balance_runtime(rt_rq);
335 spin_lock(&rt_rq->rt_runtime_lock);
336
337 if (more)
338 runtime = sched_rt_runtime(rt_rq);
339 }
340#endif
341
9f0c1e56 342 if (rt_rq->rt_time > runtime) {
6f505b16 343 rt_rq->rt_throttled = 1;
23b0fdfc 344 if (rt_rq_throttled(rt_rq)) {
9f0c1e56 345 sched_rt_rq_dequeue(rt_rq);
23b0fdfc
PZ
346 return 1;
347 }
fa85ae24
PZ
348 }
349
350 return 0;
351}
352
bb44e5d1
IM
353/*
354 * Update the current task's runtime statistics. Skip current tasks that
355 * are not in our scheduling class.
356 */
a9957449 357static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
358{
359 struct task_struct *curr = rq->curr;
6f505b16
PZ
360 struct sched_rt_entity *rt_se = &curr->rt;
361 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1
IM
362 u64 delta_exec;
363
364 if (!task_has_rt_policy(curr))
365 return;
366
d281918d 367 delta_exec = rq->clock - curr->se.exec_start;
bb44e5d1
IM
368 if (unlikely((s64)delta_exec < 0))
369 delta_exec = 0;
6cfb0d5d
IM
370
371 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
bb44e5d1
IM
372
373 curr->se.sum_exec_runtime += delta_exec;
d281918d 374 curr->se.exec_start = rq->clock;
d842de87 375 cpuacct_charge(curr, delta_exec);
fa85ae24 376
354d60c2
DG
377 for_each_sched_rt_entity(rt_se) {
378 rt_rq = rt_rq_of_se(rt_se);
379
380 spin_lock(&rt_rq->rt_runtime_lock);
381 rt_rq->rt_time += delta_exec;
382 if (sched_rt_runtime_exceeded(rt_rq))
383 resched_task(curr);
384 spin_unlock(&rt_rq->rt_runtime_lock);
385 }
bb44e5d1
IM
386}
387
6f505b16
PZ
388static inline
389void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
63489e45 390{
6f505b16
PZ
391 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
392 rt_rq->rt_nr_running++;
052f1dc7 393#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
6f505b16
PZ
394 if (rt_se_prio(rt_se) < rt_rq->highest_prio)
395 rt_rq->highest_prio = rt_se_prio(rt_se);
396#endif
764a9d6f 397#ifdef CONFIG_SMP
6f505b16
PZ
398 if (rt_se->nr_cpus_allowed > 1) {
399 struct rq *rq = rq_of_rt_rq(rt_rq);
73fe6aae 400 rq->rt.rt_nr_migratory++;
6f505b16 401 }
73fe6aae 402
6f505b16
PZ
403 update_rt_migration(rq_of_rt_rq(rt_rq));
404#endif
052f1dc7 405#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc
PZ
406 if (rt_se_boosted(rt_se))
407 rt_rq->rt_nr_boosted++;
d0b27fa7
PZ
408
409 if (rt_rq->tg)
410 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
411#else
412 start_rt_bandwidth(&def_rt_bandwidth);
23b0fdfc 413#endif
63489e45
SR
414}
415
6f505b16
PZ
416static inline
417void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
63489e45 418{
6f505b16
PZ
419 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
420 WARN_ON(!rt_rq->rt_nr_running);
421 rt_rq->rt_nr_running--;
052f1dc7 422#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
6f505b16 423 if (rt_rq->rt_nr_running) {
764a9d6f
SR
424 struct rt_prio_array *array;
425
6f505b16
PZ
426 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
427 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
764a9d6f 428 /* recalculate */
6f505b16
PZ
429 array = &rt_rq->active;
430 rt_rq->highest_prio =
764a9d6f
SR
431 sched_find_first_bit(array->bitmap);
432 } /* otherwise leave rq->highest prio alone */
433 } else
6f505b16
PZ
434 rt_rq->highest_prio = MAX_RT_PRIO;
435#endif
436#ifdef CONFIG_SMP
437 if (rt_se->nr_cpus_allowed > 1) {
438 struct rq *rq = rq_of_rt_rq(rt_rq);
73fe6aae 439 rq->rt.rt_nr_migratory--;
6f505b16 440 }
73fe6aae 441
6f505b16 442 update_rt_migration(rq_of_rt_rq(rt_rq));
764a9d6f 443#endif /* CONFIG_SMP */
052f1dc7 444#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc
PZ
445 if (rt_se_boosted(rt_se))
446 rt_rq->rt_nr_boosted--;
447
448 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
449#endif
63489e45
SR
450}
451
6f505b16 452static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
bb44e5d1 453{
6f505b16
PZ
454 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
455 struct rt_prio_array *array = &rt_rq->active;
456 struct rt_rq *group_rq = group_rt_rq(rt_se);
bb44e5d1 457
23b0fdfc 458 if (group_rq && rt_rq_throttled(group_rq))
6f505b16 459 return;
63489e45 460
6f505b16
PZ
461 list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
462 __set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db 463
6f505b16
PZ
464 inc_rt_tasks(rt_se, rt_rq);
465}
466
467static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
468{
469 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
470 struct rt_prio_array *array = &rt_rq->active;
471
472 list_del_init(&rt_se->run_list);
473 if (list_empty(array->queue + rt_se_prio(rt_se)))
474 __clear_bit(rt_se_prio(rt_se), array->bitmap);
475
476 dec_rt_tasks(rt_se, rt_rq);
477}
478
479/*
480 * Because the prio of an upper entry depends on the lower
481 * entries, we must remove entries top - down.
6f505b16
PZ
482 */
483static void dequeue_rt_stack(struct task_struct *p)
484{
58d6c2d7 485 struct sched_rt_entity *rt_se, *back = NULL;
6f505b16 486
58d6c2d7
PZ
487 rt_se = &p->rt;
488 for_each_sched_rt_entity(rt_se) {
489 rt_se->back = back;
490 back = rt_se;
491 }
492
493 for (rt_se = back; rt_se; rt_se = rt_se->back) {
494 if (on_rt_rq(rt_se))
495 dequeue_rt_entity(rt_se);
496 }
bb44e5d1
IM
497}
498
499/*
500 * Adding/removing a task to/from a priority array:
501 */
6f505b16
PZ
502static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
503{
504 struct sched_rt_entity *rt_se = &p->rt;
505
506 if (wakeup)
507 rt_se->timeout = 0;
508
509 dequeue_rt_stack(p);
510
511 /*
512 * enqueue everybody, bottom - up.
513 */
514 for_each_sched_rt_entity(rt_se)
515 enqueue_rt_entity(rt_se);
18d95a28
PZ
516
517 inc_cpu_load(rq, p->se.load.weight);
6f505b16
PZ
518}
519
f02231e5 520static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
bb44e5d1 521{
6f505b16
PZ
522 struct sched_rt_entity *rt_se = &p->rt;
523 struct rt_rq *rt_rq;
bb44e5d1 524
f1e14ef6 525 update_curr_rt(rq);
bb44e5d1 526
6f505b16
PZ
527 dequeue_rt_stack(p);
528
529 /*
530 * re-enqueue all non-empty rt_rq entities.
531 */
532 for_each_sched_rt_entity(rt_se) {
533 rt_rq = group_rt_rq(rt_se);
534 if (rt_rq && rt_rq->rt_nr_running)
535 enqueue_rt_entity(rt_se);
536 }
18d95a28
PZ
537
538 dec_cpu_load(rq, p->se.load.weight);
bb44e5d1
IM
539}
540
541/*
542 * Put task to the end of the run list without the overhead of dequeue
543 * followed by enqueue.
544 */
6f505b16
PZ
545static
546void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
547{
548 struct rt_prio_array *array = &rt_rq->active;
549
550 list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
551}
552
bb44e5d1
IM
553static void requeue_task_rt(struct rq *rq, struct task_struct *p)
554{
6f505b16
PZ
555 struct sched_rt_entity *rt_se = &p->rt;
556 struct rt_rq *rt_rq;
bb44e5d1 557
6f505b16
PZ
558 for_each_sched_rt_entity(rt_se) {
559 rt_rq = rt_rq_of_se(rt_se);
560 requeue_rt_entity(rt_rq, rt_se);
561 }
bb44e5d1
IM
562}
563
6f505b16 564static void yield_task_rt(struct rq *rq)
bb44e5d1 565{
4530d7ab 566 requeue_task_rt(rq, rq->curr);
bb44e5d1
IM
567}
568
e7693a36 569#ifdef CONFIG_SMP
318e0893
GH
570static int find_lowest_rq(struct task_struct *task);
571
e7693a36
GH
572static int select_task_rq_rt(struct task_struct *p, int sync)
573{
318e0893
GH
574 struct rq *rq = task_rq(p);
575
576 /*
e1f47d89
SR
577 * If the current task is an RT task, then
578 * try to see if we can wake this RT task up on another
579 * runqueue. Otherwise simply start this RT task
580 * on its current runqueue.
581 *
582 * We want to avoid overloading runqueues. Even if
583 * the RT task is of higher priority than the current RT task.
584 * RT tasks behave differently than other tasks. If
585 * one gets preempted, we try to push it off to another queue.
586 * So trying to keep a preempting RT task on the same
587 * cache hot CPU will force the running RT task to
588 * a cold CPU. So we waste all the cache for the lower
589 * RT task in hopes of saving some of a RT task
590 * that is just being woken and probably will have
591 * cold cache anyway.
318e0893 592 */
17b3279b 593 if (unlikely(rt_task(rq->curr)) &&
6f505b16 594 (p->rt.nr_cpus_allowed > 1)) {
318e0893
GH
595 int cpu = find_lowest_rq(p);
596
597 return (cpu == -1) ? task_cpu(p) : cpu;
598 }
599
600 /*
601 * Otherwise, just let it ride on the affined RQ and the
602 * post-schedule router will push the preempted task away
603 */
e7693a36
GH
604 return task_cpu(p);
605}
606#endif /* CONFIG_SMP */
607
bb44e5d1
IM
608/*
609 * Preempt the current task with a newly woken task if needed:
610 */
611static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
612{
613 if (p->prio < rq->curr->prio)
614 resched_task(rq->curr);
615}
616
6f505b16
PZ
617static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
618 struct rt_rq *rt_rq)
bb44e5d1 619{
6f505b16
PZ
620 struct rt_prio_array *array = &rt_rq->active;
621 struct sched_rt_entity *next = NULL;
bb44e5d1
IM
622 struct list_head *queue;
623 int idx;
624
625 idx = sched_find_first_bit(array->bitmap);
6f505b16 626 BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1
IM
627
628 queue = array->queue + idx;
6f505b16 629 next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b8 630
6f505b16
PZ
631 return next;
632}
bb44e5d1 633
6f505b16
PZ
634static struct task_struct *pick_next_task_rt(struct rq *rq)
635{
636 struct sched_rt_entity *rt_se;
637 struct task_struct *p;
638 struct rt_rq *rt_rq;
bb44e5d1 639
6f505b16
PZ
640 rt_rq = &rq->rt;
641
642 if (unlikely(!rt_rq->rt_nr_running))
643 return NULL;
644
23b0fdfc 645 if (rt_rq_throttled(rt_rq))
6f505b16
PZ
646 return NULL;
647
648 do {
649 rt_se = pick_next_rt_entity(rq, rt_rq);
326587b8 650 BUG_ON(!rt_se);
6f505b16
PZ
651 rt_rq = group_rt_rq(rt_se);
652 } while (rt_rq);
653
654 p = rt_task_of(rt_se);
655 p->se.exec_start = rq->clock;
656 return p;
bb44e5d1
IM
657}
658
31ee529c 659static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 660{
f1e14ef6 661 update_curr_rt(rq);
bb44e5d1
IM
662 p->se.exec_start = 0;
663}
664
681f3e68 665#ifdef CONFIG_SMP
6f505b16 666
e8fa1362
SR
667/* Only try algorithms three times */
668#define RT_MAX_TRIES 3
669
670static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
671static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
672
f65eda4f
SR
673static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
674{
675 if (!task_running(rq, p) &&
73fe6aae 676 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
6f505b16 677 (p->rt.nr_cpus_allowed > 1))
f65eda4f
SR
678 return 1;
679 return 0;
680}
681
e8fa1362 682/* Return the second highest RT task, NULL otherwise */
79064fbf 683static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa1362 684{
6f505b16
PZ
685 struct task_struct *next = NULL;
686 struct sched_rt_entity *rt_se;
687 struct rt_prio_array *array;
688 struct rt_rq *rt_rq;
e8fa1362
SR
689 int idx;
690
6f505b16
PZ
691 for_each_leaf_rt_rq(rt_rq, rq) {
692 array = &rt_rq->active;
693 idx = sched_find_first_bit(array->bitmap);
694 next_idx:
695 if (idx >= MAX_RT_PRIO)
696 continue;
697 if (next && next->prio < idx)
698 continue;
699 list_for_each_entry(rt_se, array->queue + idx, run_list) {
700 struct task_struct *p = rt_task_of(rt_se);
701 if (pick_rt_task(rq, p, cpu)) {
702 next = p;
703 break;
704 }
705 }
706 if (!next) {
707 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
708 goto next_idx;
709 }
f65eda4f
SR
710 }
711
e8fa1362
SR
712 return next;
713}
714
715static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
716
6e1254d2 717static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
e8fa1362 718{
6e1254d2 719 int lowest_prio = -1;
610bf056 720 int lowest_cpu = -1;
06f90dbd 721 int count = 0;
610bf056 722 int cpu;
e8fa1362 723
637f5085 724 cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
e8fa1362 725
07b4032c
GH
726 /*
727 * Scan each rq for the lowest prio.
728 */
610bf056 729 for_each_cpu_mask(cpu, *lowest_mask) {
07b4032c 730 struct rq *rq = cpu_rq(cpu);
e8fa1362 731
07b4032c
GH
732 /* We look for lowest RT prio or non-rt CPU */
733 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
610bf056
SR
734 /*
735 * if we already found a low RT queue
736 * and now we found this non-rt queue
737 * clear the mask and set our bit.
738 * Otherwise just return the queue as is
739 * and the count==1 will cause the algorithm
740 * to use the first bit found.
741 */
742 if (lowest_cpu != -1) {
6e1254d2 743 cpus_clear(*lowest_mask);
610bf056
SR
744 cpu_set(rq->cpu, *lowest_mask);
745 }
6e1254d2 746 return 1;
07b4032c
GH
747 }
748
749 /* no locking for now */
6e1254d2
GH
750 if ((rq->rt.highest_prio > task->prio)
751 && (rq->rt.highest_prio >= lowest_prio)) {
752 if (rq->rt.highest_prio > lowest_prio) {
753 /* new low - clear old data */
754 lowest_prio = rq->rt.highest_prio;
610bf056
SR
755 lowest_cpu = cpu;
756 count = 0;
6e1254d2 757 }
06f90dbd 758 count++;
610bf056
SR
759 } else
760 cpu_clear(cpu, *lowest_mask);
761 }
762
763 /*
764 * Clear out all the set bits that represent
765 * runqueues that were of higher prio than
766 * the lowest_prio.
767 */
768 if (lowest_cpu > 0) {
769 /*
770 * Perhaps we could add another cpumask op to
771 * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
772 * Then that could be optimized to use memset and such.
773 */
774 for_each_cpu_mask(cpu, *lowest_mask) {
775 if (cpu >= lowest_cpu)
776 break;
777 cpu_clear(cpu, *lowest_mask);
e8fa1362 778 }
07b4032c
GH
779 }
780
06f90dbd 781 return count;
6e1254d2
GH
782}
783
784static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
785{
786 int first;
787
788 /* "this_cpu" is cheaper to preempt than a remote processor */
789 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
790 return this_cpu;
791
792 first = first_cpu(*mask);
793 if (first != NR_CPUS)
794 return first;
795
796 return -1;
797}
798
799static int find_lowest_rq(struct task_struct *task)
800{
801 struct sched_domain *sd;
802 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
803 int this_cpu = smp_processor_id();
804 int cpu = task_cpu(task);
06f90dbd
GH
805 int count = find_lowest_cpus(task, lowest_mask);
806
807 if (!count)
808 return -1; /* No targets found */
6e1254d2 809
06f90dbd
GH
810 /*
811 * There is no sense in performing an optimal search if only one
812 * target is found.
813 */
814 if (count == 1)
815 return first_cpu(*lowest_mask);
6e1254d2
GH
816
817 /*
818 * At this point we have built a mask of cpus representing the
819 * lowest priority tasks in the system. Now we want to elect
820 * the best one based on our affinity and topology.
821 *
822 * We prioritize the last cpu that the task executed on since
823 * it is most likely cache-hot in that location.
824 */
825 if (cpu_isset(cpu, *lowest_mask))
826 return cpu;
827
828 /*
829 * Otherwise, we consult the sched_domains span maps to figure
830 * out which cpu is logically closest to our hot cache data.
831 */
832 if (this_cpu == cpu)
833 this_cpu = -1; /* Skip this_cpu opt if the same */
834
835 for_each_domain(cpu, sd) {
836 if (sd->flags & SD_WAKE_AFFINE) {
837 cpumask_t domain_mask;
838 int best_cpu;
839
840 cpus_and(domain_mask, sd->span, *lowest_mask);
841
842 best_cpu = pick_optimal_cpu(this_cpu,
843 &domain_mask);
844 if (best_cpu != -1)
845 return best_cpu;
846 }
847 }
848
849 /*
850 * And finally, if there were no matches within the domains
851 * just give the caller *something* to work with from the compatible
852 * locations.
853 */
854 return pick_optimal_cpu(this_cpu, lowest_mask);
07b4032c
GH
855}
856
857/* Will lock the rq it finds */
4df64c0b 858static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c
GH
859{
860 struct rq *lowest_rq = NULL;
07b4032c 861 int tries;
4df64c0b 862 int cpu;
e8fa1362 863
07b4032c
GH
864 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
865 cpu = find_lowest_rq(task);
866
2de0b463 867 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
868 break;
869
07b4032c
GH
870 lowest_rq = cpu_rq(cpu);
871
e8fa1362 872 /* if the prio of this runqueue changed, try again */
07b4032c 873 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
874 /*
875 * We had to unlock the run queue. In
876 * the mean time, task could have
877 * migrated already or had its affinity changed.
878 * Also make sure that it wasn't scheduled on its rq.
879 */
07b4032c 880 if (unlikely(task_rq(task) != rq ||
4df64c0b
IM
881 !cpu_isset(lowest_rq->cpu,
882 task->cpus_allowed) ||
07b4032c 883 task_running(rq, task) ||
e8fa1362 884 !task->se.on_rq)) {
4df64c0b 885
e8fa1362
SR
886 spin_unlock(&lowest_rq->lock);
887 lowest_rq = NULL;
888 break;
889 }
890 }
891
892 /* If this rq is still suitable use it. */
893 if (lowest_rq->rt.highest_prio > task->prio)
894 break;
895
896 /* try again */
897 spin_unlock(&lowest_rq->lock);
898 lowest_rq = NULL;
899 }
900
901 return lowest_rq;
902}
903
904/*
905 * If the current CPU has more than one RT task, see if the non
906 * running task can migrate over to a CPU that is running a task
907 * of lesser priority.
908 */
697f0a48 909static int push_rt_task(struct rq *rq)
e8fa1362
SR
910{
911 struct task_struct *next_task;
912 struct rq *lowest_rq;
913 int ret = 0;
914 int paranoid = RT_MAX_TRIES;
915
a22d7fc1
GH
916 if (!rq->rt.overloaded)
917 return 0;
918
697f0a48 919 next_task = pick_next_highest_task_rt(rq, -1);
e8fa1362
SR
920 if (!next_task)
921 return 0;
922
923 retry:
697f0a48 924 if (unlikely(next_task == rq->curr)) {
f65eda4f 925 WARN_ON(1);
e8fa1362 926 return 0;
f65eda4f 927 }
e8fa1362
SR
928
929 /*
930 * It's possible that the next_task slipped in of
931 * higher priority than current. If that's the case
932 * just reschedule current.
933 */
697f0a48
GH
934 if (unlikely(next_task->prio < rq->curr->prio)) {
935 resched_task(rq->curr);
e8fa1362
SR
936 return 0;
937 }
938
697f0a48 939 /* We might release rq lock */
e8fa1362
SR
940 get_task_struct(next_task);
941
942 /* find_lock_lowest_rq locks the rq if found */
697f0a48 943 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
944 if (!lowest_rq) {
945 struct task_struct *task;
946 /*
697f0a48 947 * find lock_lowest_rq releases rq->lock
e8fa1362
SR
948 * so it is possible that next_task has changed.
949 * If it has, then try again.
950 */
697f0a48 951 task = pick_next_highest_task_rt(rq, -1);
e8fa1362
SR
952 if (unlikely(task != next_task) && task && paranoid--) {
953 put_task_struct(next_task);
954 next_task = task;
955 goto retry;
956 }
957 goto out;
958 }
959
697f0a48 960 deactivate_task(rq, next_task, 0);
e8fa1362
SR
961 set_task_cpu(next_task, lowest_rq->cpu);
962 activate_task(lowest_rq, next_task, 0);
963
964 resched_task(lowest_rq->curr);
965
966 spin_unlock(&lowest_rq->lock);
967
968 ret = 1;
969out:
970 put_task_struct(next_task);
971
972 return ret;
973}
974
975/*
976 * TODO: Currently we just use the second highest prio task on
977 * the queue, and stop when it can't migrate (or there's
978 * no more RT tasks). There may be a case where a lower
979 * priority RT task has a different affinity than the
980 * higher RT task. In this case the lower RT task could
981 * possibly be able to migrate where as the higher priority
982 * RT task could not. We currently ignore this issue.
983 * Enhancements are welcome!
984 */
985static void push_rt_tasks(struct rq *rq)
986{
987 /* push_rt_task will return true if it moved an RT */
988 while (push_rt_task(rq))
989 ;
990}
991
f65eda4f
SR
992static int pull_rt_task(struct rq *this_rq)
993{
80bf3171
IM
994 int this_cpu = this_rq->cpu, ret = 0, cpu;
995 struct task_struct *p, *next;
f65eda4f 996 struct rq *src_rq;
f65eda4f 997
637f5085 998 if (likely(!rt_overloaded(this_rq)))
f65eda4f
SR
999 return 0;
1000
1001 next = pick_next_task_rt(this_rq);
1002
637f5085 1003 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
f65eda4f
SR
1004 if (this_cpu == cpu)
1005 continue;
1006
1007 src_rq = cpu_rq(cpu);
f65eda4f
SR
1008 /*
1009 * We can potentially drop this_rq's lock in
1010 * double_lock_balance, and another CPU could
1011 * steal our next task - hence we must cause
1012 * the caller to recalculate the next task
1013 * in that case:
1014 */
1015 if (double_lock_balance(this_rq, src_rq)) {
1016 struct task_struct *old_next = next;
80bf3171 1017
f65eda4f
SR
1018 next = pick_next_task_rt(this_rq);
1019 if (next != old_next)
1020 ret = 1;
1021 }
1022
1023 /*
1024 * Are there still pullable RT tasks?
1025 */
614ee1f6
MG
1026 if (src_rq->rt.rt_nr_running <= 1)
1027 goto skip;
f65eda4f 1028
f65eda4f
SR
1029 p = pick_next_highest_task_rt(src_rq, this_cpu);
1030
1031 /*
1032 * Do we have an RT task that preempts
1033 * the to-be-scheduled task?
1034 */
1035 if (p && (!next || (p->prio < next->prio))) {
1036 WARN_ON(p == src_rq->curr);
1037 WARN_ON(!p->se.on_rq);
1038
1039 /*
1040 * There's a chance that p is higher in priority
1041 * than what's currently running on its cpu.
1042 * This is just that p is wakeing up and hasn't
1043 * had a chance to schedule. We only pull
1044 * p if it is lower in priority than the
1045 * current task on the run queue or
1046 * this_rq next task is lower in prio than
1047 * the current task on that rq.
1048 */
1049 if (p->prio < src_rq->curr->prio ||
1050 (next && next->prio < src_rq->curr->prio))
614ee1f6 1051 goto skip;
f65eda4f
SR
1052
1053 ret = 1;
1054
1055 deactivate_task(src_rq, p, 0);
1056 set_task_cpu(p, this_cpu);
1057 activate_task(this_rq, p, 0);
1058 /*
1059 * We continue with the search, just in
1060 * case there's an even higher prio task
1061 * in another runqueue. (low likelyhood
1062 * but possible)
80bf3171 1063 *
f65eda4f
SR
1064 * Update next so that we won't pick a task
1065 * on another cpu with a priority lower (or equal)
1066 * than the one we just picked.
1067 */
1068 next = p;
1069
1070 }
614ee1f6 1071 skip:
f65eda4f
SR
1072 spin_unlock(&src_rq->lock);
1073 }
1074
1075 return ret;
1076}
1077
9a897c5a 1078static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f
SR
1079{
1080 /* Try to pull RT tasks here if we lower this rq's prio */
7f51f298 1081 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
f65eda4f
SR
1082 pull_rt_task(rq);
1083}
1084
9a897c5a 1085static void post_schedule_rt(struct rq *rq)
e8fa1362
SR
1086{
1087 /*
1088 * If we have more than one rt_task queued, then
1089 * see if we can push the other rt_tasks off to other CPUS.
1090 * Note we may release the rq lock, and since
1091 * the lock was owned by prev, we need to release it
1092 * first via finish_lock_switch and then reaquire it here.
1093 */
a22d7fc1 1094 if (unlikely(rq->rt.overloaded)) {
e8fa1362
SR
1095 spin_lock_irq(&rq->lock);
1096 push_rt_tasks(rq);
1097 spin_unlock_irq(&rq->lock);
1098 }
1099}
1100
8ae121ac
GH
1101/*
1102 * If we are not running and we are not going to reschedule soon, we should
1103 * try to push tasks away now
1104 */
9a897c5a 1105static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
4642dafd 1106{
9a897c5a 1107 if (!task_running(rq, p) &&
8ae121ac 1108 !test_tsk_need_resched(rq->curr) &&
a22d7fc1 1109 rq->rt.overloaded)
4642dafd
SR
1110 push_rt_tasks(rq);
1111}
1112
43010659 1113static unsigned long
bb44e5d1 1114load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f
PW
1115 unsigned long max_load_move,
1116 struct sched_domain *sd, enum cpu_idle_type idle,
1117 int *all_pinned, int *this_best_prio)
bb44e5d1 1118{
c7a1e46a
SR
1119 /* don't touch RT tasks */
1120 return 0;
e1d1484f
PW
1121}
1122
1123static int
1124move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1125 struct sched_domain *sd, enum cpu_idle_type idle)
1126{
c7a1e46a
SR
1127 /* don't touch RT tasks */
1128 return 0;
bb44e5d1 1129}
deeeccd4 1130
cd8ba7cd
MT
1131static void set_cpus_allowed_rt(struct task_struct *p,
1132 const cpumask_t *new_mask)
73fe6aae
GH
1133{
1134 int weight = cpus_weight(*new_mask);
1135
1136 BUG_ON(!rt_task(p));
1137
1138 /*
1139 * Update the migration status of the RQ if we have an RT task
1140 * which is running AND changing its weight value.
1141 */
6f505b16 1142 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
73fe6aae
GH
1143 struct rq *rq = task_rq(p);
1144
6f505b16 1145 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
73fe6aae 1146 rq->rt.rt_nr_migratory++;
6f505b16 1147 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
73fe6aae
GH
1148 BUG_ON(!rq->rt.rt_nr_migratory);
1149 rq->rt.rt_nr_migratory--;
1150 }
1151
1152 update_rt_migration(rq);
1153 }
1154
1155 p->cpus_allowed = *new_mask;
6f505b16 1156 p->rt.nr_cpus_allowed = weight;
73fe6aae 1157}
deeeccd4 1158
bdd7c81b
IM
1159/* Assumes rq->lock is held */
1160static void join_domain_rt(struct rq *rq)
1161{
1162 if (rq->rt.overloaded)
1163 rt_set_overload(rq);
1164}
1165
1166/* Assumes rq->lock is held */
1167static void leave_domain_rt(struct rq *rq)
1168{
1169 if (rq->rt.overloaded)
1170 rt_clear_overload(rq);
1171}
cb469845
SR
1172
1173/*
1174 * When switch from the rt queue, we bring ourselves to a position
1175 * that we might want to pull RT tasks from other runqueues.
1176 */
1177static void switched_from_rt(struct rq *rq, struct task_struct *p,
1178 int running)
1179{
1180 /*
1181 * If there are other RT tasks then we will reschedule
1182 * and the scheduling of the other RT tasks will handle
1183 * the balancing. But if we are the last RT task
1184 * we may need to handle the pulling of RT tasks
1185 * now.
1186 */
1187 if (!rq->rt.rt_nr_running)
1188 pull_rt_task(rq);
1189}
1190#endif /* CONFIG_SMP */
1191
1192/*
1193 * When switching a task to RT, we may overload the runqueue
1194 * with RT tasks. In this case we try to push them off to
1195 * other runqueues.
1196 */
1197static void switched_to_rt(struct rq *rq, struct task_struct *p,
1198 int running)
1199{
1200 int check_resched = 1;
1201
1202 /*
1203 * If we are already running, then there's nothing
1204 * that needs to be done. But if we are not running
1205 * we may need to preempt the current running task.
1206 * If that current running task is also an RT task
1207 * then see if we can move to another run queue.
1208 */
1209 if (!running) {
1210#ifdef CONFIG_SMP
1211 if (rq->rt.overloaded && push_rt_task(rq) &&
1212 /* Don't resched if we changed runqueues */
1213 rq != task_rq(p))
1214 check_resched = 0;
1215#endif /* CONFIG_SMP */
1216 if (check_resched && p->prio < rq->curr->prio)
1217 resched_task(rq->curr);
1218 }
1219}
1220
1221/*
1222 * Priority of the task has changed. This may cause
1223 * us to initiate a push or pull.
1224 */
1225static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1226 int oldprio, int running)
1227{
1228 if (running) {
1229#ifdef CONFIG_SMP
1230 /*
1231 * If our priority decreases while running, we
1232 * may need to pull tasks to this runqueue.
1233 */
1234 if (oldprio < p->prio)
1235 pull_rt_task(rq);
1236 /*
1237 * If there's a higher priority task waiting to run
6fa46fa5
SR
1238 * then reschedule. Note, the above pull_rt_task
1239 * can release the rq lock and p could migrate.
1240 * Only reschedule if p is still on the same runqueue.
cb469845 1241 */
6fa46fa5 1242 if (p->prio > rq->rt.highest_prio && rq->curr == p)
cb469845
SR
1243 resched_task(p);
1244#else
1245 /* For UP simply resched on drop of prio */
1246 if (oldprio < p->prio)
1247 resched_task(p);
e8fa1362 1248#endif /* CONFIG_SMP */
cb469845
SR
1249 } else {
1250 /*
1251 * This task is not running, but if it is
1252 * greater than the current running task
1253 * then reschedule.
1254 */
1255 if (p->prio < rq->curr->prio)
1256 resched_task(rq->curr);
1257 }
1258}
1259
78f2c7db
PZ
1260static void watchdog(struct rq *rq, struct task_struct *p)
1261{
1262 unsigned long soft, hard;
1263
1264 if (!p->signal)
1265 return;
1266
1267 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1268 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1269
1270 if (soft != RLIM_INFINITY) {
1271 unsigned long next;
1272
1273 p->rt.timeout++;
1274 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd50 1275 if (p->rt.timeout > next)
78f2c7db
PZ
1276 p->it_sched_expires = p->se.sum_exec_runtime;
1277 }
1278}
bb44e5d1 1279
8f4d37ec 1280static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1 1281{
67e2be02
PZ
1282 update_curr_rt(rq);
1283
78f2c7db
PZ
1284 watchdog(rq, p);
1285
bb44e5d1
IM
1286 /*
1287 * RR tasks need a special form of timeslice management.
1288 * FIFO tasks have no timeslices.
1289 */
1290 if (p->policy != SCHED_RR)
1291 return;
1292
fa717060 1293 if (--p->rt.time_slice)
bb44e5d1
IM
1294 return;
1295
fa717060 1296 p->rt.time_slice = DEF_TIMESLICE;
bb44e5d1 1297
98fbc798
DA
1298 /*
1299 * Requeue to the end of queue if we are not the only element
1300 * on the queue:
1301 */
fa717060 1302 if (p->rt.run_list.prev != p->rt.run_list.next) {
98fbc798
DA
1303 requeue_task_rt(rq, p);
1304 set_tsk_need_resched(p);
1305 }
bb44e5d1
IM
1306}
1307
83b699ed
SV
1308static void set_curr_task_rt(struct rq *rq)
1309{
1310 struct task_struct *p = rq->curr;
1311
1312 p->se.exec_start = rq->clock;
1313}
1314
2abdad0a 1315static const struct sched_class rt_sched_class = {
5522d5d5 1316 .next = &fair_sched_class,
bb44e5d1
IM
1317 .enqueue_task = enqueue_task_rt,
1318 .dequeue_task = dequeue_task_rt,
1319 .yield_task = yield_task_rt,
e7693a36
GH
1320#ifdef CONFIG_SMP
1321 .select_task_rq = select_task_rq_rt,
1322#endif /* CONFIG_SMP */
bb44e5d1
IM
1323
1324 .check_preempt_curr = check_preempt_curr_rt,
1325
1326 .pick_next_task = pick_next_task_rt,
1327 .put_prev_task = put_prev_task_rt,
1328
681f3e68 1329#ifdef CONFIG_SMP
bb44e5d1 1330 .load_balance = load_balance_rt,
e1d1484f 1331 .move_one_task = move_one_task_rt,
73fe6aae 1332 .set_cpus_allowed = set_cpus_allowed_rt,
bdd7c81b
IM
1333 .join_domain = join_domain_rt,
1334 .leave_domain = leave_domain_rt,
9a897c5a
SR
1335 .pre_schedule = pre_schedule_rt,
1336 .post_schedule = post_schedule_rt,
1337 .task_wake_up = task_wake_up_rt,
cb469845 1338 .switched_from = switched_from_rt,
681f3e68 1339#endif
bb44e5d1 1340
83b699ed 1341 .set_curr_task = set_curr_task_rt,
bb44e5d1 1342 .task_tick = task_tick_rt,
cb469845
SR
1343
1344 .prio_changed = prio_changed_rt,
1345 .switched_to = switched_to_rt,
bb44e5d1 1346};