]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched_rt.c
sched: fix the cpuprio count really
[net-next-2.6.git] / kernel / sched_rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
4fd29176 6#ifdef CONFIG_SMP
84de4274 7
637f5085 8static inline int rt_overloaded(struct rq *rq)
4fd29176 9{
637f5085 10 return atomic_read(&rq->rd->rto_count);
4fd29176 11}
84de4274 12
4fd29176
SR
13static inline void rt_set_overload(struct rq *rq)
14{
1f11eb6a
GH
15 if (!rq->online)
16 return;
17
637f5085 18 cpu_set(rq->cpu, rq->rd->rto_mask);
4fd29176
SR
19 /*
20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine
22 * if we should look at the mask. It would be a shame
23 * if we looked at the mask, but the mask was not
24 * updated yet.
25 */
26 wmb();
637f5085 27 atomic_inc(&rq->rd->rto_count);
4fd29176 28}
84de4274 29
4fd29176
SR
30static inline void rt_clear_overload(struct rq *rq)
31{
1f11eb6a
GH
32 if (!rq->online)
33 return;
34
4fd29176 35 /* the order here really doesn't matter */
637f5085
GH
36 atomic_dec(&rq->rd->rto_count);
37 cpu_clear(rq->cpu, rq->rd->rto_mask);
4fd29176 38}
73fe6aae
GH
39
40static void update_rt_migration(struct rq *rq)
41{
637f5085 42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
cdc8eb98
GH
43 if (!rq->rt.overloaded) {
44 rt_set_overload(rq);
45 rq->rt.overloaded = 1;
46 }
47 } else if (rq->rt.overloaded) {
73fe6aae 48 rt_clear_overload(rq);
637f5085
GH
49 rq->rt.overloaded = 0;
50 }
73fe6aae 51}
4fd29176
SR
52#endif /* CONFIG_SMP */
53
6f505b16 54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
fa85ae24 55{
6f505b16
PZ
56 return container_of(rt_se, struct task_struct, rt);
57}
58
59static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{
61 return !list_empty(&rt_se->run_list);
62}
63
052f1dc7 64#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 65
9f0c1e56 66static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b16
PZ
67{
68 if (!rt_rq->tg)
9f0c1e56 69 return RUNTIME_INF;
6f505b16 70
ac086bc2
PZ
71 return rt_rq->rt_runtime;
72}
73
74static inline u64 sched_rt_period(struct rt_rq *rt_rq)
75{
76 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b16
PZ
77}
78
79#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent)
94
95static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
96{
97 return rt_se->my_q;
98}
99
100static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102
9f0c1e56 103static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16
PZ
104{
105 struct sched_rt_entity *rt_se = rt_rq->rt_se;
106
107 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
1020387f
PZ
108 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
109
6f505b16 110 enqueue_rt_entity(rt_se);
1020387f
PZ
111 if (rt_rq->highest_prio < curr->prio)
112 resched_task(curr);
6f505b16
PZ
113 }
114}
115
9f0c1e56 116static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
117{
118 struct sched_rt_entity *rt_se = rt_rq->rt_se;
119
120 if (rt_se && on_rt_rq(rt_se))
121 dequeue_rt_entity(rt_se);
122}
123
23b0fdfc
PZ
124static inline int rt_rq_throttled(struct rt_rq *rt_rq)
125{
126 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
127}
128
129static int rt_se_boosted(struct sched_rt_entity *rt_se)
130{
131 struct rt_rq *rt_rq = group_rt_rq(rt_se);
132 struct task_struct *p;
133
134 if (rt_rq)
135 return !!rt_rq->rt_nr_boosted;
136
137 p = rt_task_of(rt_se);
138 return p->prio != p->normal_prio;
139}
140
d0b27fa7
PZ
141#ifdef CONFIG_SMP
142static inline cpumask_t sched_rt_period_mask(void)
143{
144 return cpu_rq(smp_processor_id())->rd->span;
145}
6f505b16 146#else
d0b27fa7
PZ
147static inline cpumask_t sched_rt_period_mask(void)
148{
149 return cpu_online_map;
150}
151#endif
6f505b16 152
d0b27fa7
PZ
153static inline
154struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b16 155{
d0b27fa7
PZ
156 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
157}
9f0c1e56 158
ac086bc2
PZ
159static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
160{
161 return &rt_rq->tg->rt_bandwidth;
162}
163
d0b27fa7
PZ
164#else
165
166static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
167{
ac086bc2
PZ
168 return rt_rq->rt_runtime;
169}
170
171static inline u64 sched_rt_period(struct rt_rq *rt_rq)
172{
173 return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b16
PZ
174}
175
176#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL)
194
195static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
196{
197 return NULL;
198}
199
9f0c1e56 200static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16
PZ
201{
202}
203
9f0c1e56 204static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
205{
206}
207
23b0fdfc
PZ
208static inline int rt_rq_throttled(struct rt_rq *rt_rq)
209{
210 return rt_rq->rt_throttled;
211}
d0b27fa7
PZ
212
213static inline cpumask_t sched_rt_period_mask(void)
214{
215 return cpu_online_map;
216}
217
218static inline
219struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
220{
221 return &cpu_rq(cpu)->rt;
222}
223
ac086bc2
PZ
224static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
225{
226 return &def_rt_bandwidth;
227}
228
6f505b16
PZ
229#endif
230
d0b27fa7
PZ
231static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
232{
233 int i, idle = 1;
234 cpumask_t span;
235
236 if (rt_b->rt_runtime == RUNTIME_INF)
237 return 1;
238
239 span = sched_rt_period_mask();
240 for_each_cpu_mask(i, span) {
241 int enqueue = 0;
242 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
243 struct rq *rq = rq_of_rt_rq(rt_rq);
244
245 spin_lock(&rq->lock);
246 if (rt_rq->rt_time) {
ac086bc2 247 u64 runtime;
d0b27fa7 248
ac086bc2
PZ
249 spin_lock(&rt_rq->rt_runtime_lock);
250 runtime = rt_rq->rt_runtime;
d0b27fa7
PZ
251 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
252 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
253 rt_rq->rt_throttled = 0;
254 enqueue = 1;
255 }
256 if (rt_rq->rt_time || rt_rq->rt_nr_running)
257 idle = 0;
ac086bc2 258 spin_unlock(&rt_rq->rt_runtime_lock);
d0b27fa7
PZ
259 }
260
261 if (enqueue)
262 sched_rt_rq_enqueue(rt_rq);
263 spin_unlock(&rq->lock);
264 }
265
266 return idle;
267}
268
ac086bc2
PZ
269#ifdef CONFIG_SMP
270static int balance_runtime(struct rt_rq *rt_rq)
271{
272 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
273 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
274 int i, weight, more = 0;
275 u64 rt_period;
276
277 weight = cpus_weight(rd->span);
278
279 spin_lock(&rt_b->rt_runtime_lock);
280 rt_period = ktime_to_ns(rt_b->rt_period);
281 for_each_cpu_mask(i, rd->span) {
282 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
283 s64 diff;
284
285 if (iter == rt_rq)
286 continue;
287
288 spin_lock(&iter->rt_runtime_lock);
289 diff = iter->rt_runtime - iter->rt_time;
290 if (diff > 0) {
291 do_div(diff, weight);
292 if (rt_rq->rt_runtime + diff > rt_period)
293 diff = rt_period - rt_rq->rt_runtime;
294 iter->rt_runtime -= diff;
295 rt_rq->rt_runtime += diff;
296 more = 1;
297 if (rt_rq->rt_runtime == rt_period) {
298 spin_unlock(&iter->rt_runtime_lock);
299 break;
300 }
301 }
302 spin_unlock(&iter->rt_runtime_lock);
303 }
304 spin_unlock(&rt_b->rt_runtime_lock);
305
306 return more;
307}
308#endif
309
6f505b16
PZ
310static inline int rt_se_prio(struct sched_rt_entity *rt_se)
311{
052f1dc7 312#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
313 struct rt_rq *rt_rq = group_rt_rq(rt_se);
314
315 if (rt_rq)
316 return rt_rq->highest_prio;
317#endif
318
319 return rt_task_of(rt_se)->prio;
320}
321
9f0c1e56 322static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b16 323{
9f0c1e56 324 u64 runtime = sched_rt_runtime(rt_rq);
fa85ae24 325
9f0c1e56 326 if (runtime == RUNTIME_INF)
fa85ae24
PZ
327 return 0;
328
329 if (rt_rq->rt_throttled)
23b0fdfc 330 return rt_rq_throttled(rt_rq);
fa85ae24 331
ac086bc2
PZ
332 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
333 return 0;
334
335#ifdef CONFIG_SMP
336 if (rt_rq->rt_time > runtime) {
337 int more;
338
339 spin_unlock(&rt_rq->rt_runtime_lock);
340 more = balance_runtime(rt_rq);
341 spin_lock(&rt_rq->rt_runtime_lock);
342
343 if (more)
344 runtime = sched_rt_runtime(rt_rq);
345 }
346#endif
347
9f0c1e56 348 if (rt_rq->rt_time > runtime) {
6f505b16 349 rt_rq->rt_throttled = 1;
23b0fdfc 350 if (rt_rq_throttled(rt_rq)) {
9f0c1e56 351 sched_rt_rq_dequeue(rt_rq);
23b0fdfc
PZ
352 return 1;
353 }
fa85ae24
PZ
354 }
355
356 return 0;
357}
358
bb44e5d1
IM
359/*
360 * Update the current task's runtime statistics. Skip current tasks that
361 * are not in our scheduling class.
362 */
a9957449 363static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
364{
365 struct task_struct *curr = rq->curr;
6f505b16
PZ
366 struct sched_rt_entity *rt_se = &curr->rt;
367 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1
IM
368 u64 delta_exec;
369
370 if (!task_has_rt_policy(curr))
371 return;
372
d281918d 373 delta_exec = rq->clock - curr->se.exec_start;
bb44e5d1
IM
374 if (unlikely((s64)delta_exec < 0))
375 delta_exec = 0;
6cfb0d5d
IM
376
377 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
bb44e5d1
IM
378
379 curr->se.sum_exec_runtime += delta_exec;
d281918d 380 curr->se.exec_start = rq->clock;
d842de87 381 cpuacct_charge(curr, delta_exec);
fa85ae24 382
354d60c2
DG
383 for_each_sched_rt_entity(rt_se) {
384 rt_rq = rt_rq_of_se(rt_se);
385
386 spin_lock(&rt_rq->rt_runtime_lock);
387 rt_rq->rt_time += delta_exec;
388 if (sched_rt_runtime_exceeded(rt_rq))
389 resched_task(curr);
390 spin_unlock(&rt_rq->rt_runtime_lock);
391 }
bb44e5d1
IM
392}
393
6f505b16
PZ
394static inline
395void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
63489e45 396{
6f505b16
PZ
397 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
398 rt_rq->rt_nr_running++;
052f1dc7 399#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
6e0534f2
GH
400 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
401 struct rq *rq = rq_of_rt_rq(rt_rq);
6f505b16 402 rt_rq->highest_prio = rt_se_prio(rt_se);
1f11eb6a
GH
403
404 if (rq->online)
405 cpupri_set(&rq->rd->cpupri, rq->cpu,
406 rt_se_prio(rt_se));
6e0534f2 407 }
6f505b16 408#endif
764a9d6f 409#ifdef CONFIG_SMP
6f505b16
PZ
410 if (rt_se->nr_cpus_allowed > 1) {
411 struct rq *rq = rq_of_rt_rq(rt_rq);
73fe6aae 412 rq->rt.rt_nr_migratory++;
6f505b16 413 }
73fe6aae 414
6f505b16
PZ
415 update_rt_migration(rq_of_rt_rq(rt_rq));
416#endif
052f1dc7 417#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc
PZ
418 if (rt_se_boosted(rt_se))
419 rt_rq->rt_nr_boosted++;
d0b27fa7
PZ
420
421 if (rt_rq->tg)
422 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
423#else
424 start_rt_bandwidth(&def_rt_bandwidth);
23b0fdfc 425#endif
63489e45
SR
426}
427
6f505b16
PZ
428static inline
429void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
63489e45 430{
6e0534f2
GH
431#ifdef CONFIG_SMP
432 int highest_prio = rt_rq->highest_prio;
433#endif
434
6f505b16
PZ
435 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
436 WARN_ON(!rt_rq->rt_nr_running);
437 rt_rq->rt_nr_running--;
052f1dc7 438#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
6f505b16 439 if (rt_rq->rt_nr_running) {
764a9d6f
SR
440 struct rt_prio_array *array;
441
6f505b16
PZ
442 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
443 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
764a9d6f 444 /* recalculate */
6f505b16
PZ
445 array = &rt_rq->active;
446 rt_rq->highest_prio =
764a9d6f
SR
447 sched_find_first_bit(array->bitmap);
448 } /* otherwise leave rq->highest prio alone */
449 } else
6f505b16
PZ
450 rt_rq->highest_prio = MAX_RT_PRIO;
451#endif
452#ifdef CONFIG_SMP
453 if (rt_se->nr_cpus_allowed > 1) {
454 struct rq *rq = rq_of_rt_rq(rt_rq);
73fe6aae 455 rq->rt.rt_nr_migratory--;
6f505b16 456 }
73fe6aae 457
6e0534f2
GH
458 if (rt_rq->highest_prio != highest_prio) {
459 struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a
GH
460
461 if (rq->online)
462 cpupri_set(&rq->rd->cpupri, rq->cpu,
463 rt_rq->highest_prio);
6e0534f2
GH
464 }
465
6f505b16 466 update_rt_migration(rq_of_rt_rq(rt_rq));
764a9d6f 467#endif /* CONFIG_SMP */
052f1dc7 468#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc
PZ
469 if (rt_se_boosted(rt_se))
470 rt_rq->rt_nr_boosted--;
471
472 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
473#endif
63489e45
SR
474}
475
6f505b16 476static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
bb44e5d1 477{
6f505b16
PZ
478 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
479 struct rt_prio_array *array = &rt_rq->active;
480 struct rt_rq *group_rq = group_rt_rq(rt_se);
bb44e5d1 481
23b0fdfc 482 if (group_rq && rt_rq_throttled(group_rq))
6f505b16 483 return;
63489e45 484
45c01e82
GH
485 if (rt_se->nr_cpus_allowed == 1)
486 list_add_tail(&rt_se->run_list,
487 array->xqueue + rt_se_prio(rt_se));
488 else
489 list_add_tail(&rt_se->run_list,
490 array->squeue + rt_se_prio(rt_se));
491
6f505b16 492 __set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db 493
6f505b16
PZ
494 inc_rt_tasks(rt_se, rt_rq);
495}
496
497static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
498{
499 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
500 struct rt_prio_array *array = &rt_rq->active;
501
502 list_del_init(&rt_se->run_list);
45c01e82
GH
503 if (list_empty(array->squeue + rt_se_prio(rt_se))
504 && list_empty(array->xqueue + rt_se_prio(rt_se)))
6f505b16
PZ
505 __clear_bit(rt_se_prio(rt_se), array->bitmap);
506
507 dec_rt_tasks(rt_se, rt_rq);
508}
509
510/*
511 * Because the prio of an upper entry depends on the lower
512 * entries, we must remove entries top - down.
6f505b16
PZ
513 */
514static void dequeue_rt_stack(struct task_struct *p)
515{
58d6c2d7 516 struct sched_rt_entity *rt_se, *back = NULL;
6f505b16 517
58d6c2d7
PZ
518 rt_se = &p->rt;
519 for_each_sched_rt_entity(rt_se) {
520 rt_se->back = back;
521 back = rt_se;
522 }
523
524 for (rt_se = back; rt_se; rt_se = rt_se->back) {
525 if (on_rt_rq(rt_se))
526 dequeue_rt_entity(rt_se);
527 }
bb44e5d1
IM
528}
529
530/*
531 * Adding/removing a task to/from a priority array:
532 */
6f505b16
PZ
533static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
534{
535 struct sched_rt_entity *rt_se = &p->rt;
536
537 if (wakeup)
538 rt_se->timeout = 0;
539
540 dequeue_rt_stack(p);
541
542 /*
543 * enqueue everybody, bottom - up.
544 */
545 for_each_sched_rt_entity(rt_se)
546 enqueue_rt_entity(rt_se);
6f505b16
PZ
547}
548
f02231e5 549static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
bb44e5d1 550{
6f505b16
PZ
551 struct sched_rt_entity *rt_se = &p->rt;
552 struct rt_rq *rt_rq;
bb44e5d1 553
f1e14ef6 554 update_curr_rt(rq);
bb44e5d1 555
6f505b16
PZ
556 dequeue_rt_stack(p);
557
558 /*
559 * re-enqueue all non-empty rt_rq entities.
560 */
561 for_each_sched_rt_entity(rt_se) {
562 rt_rq = group_rt_rq(rt_se);
563 if (rt_rq && rt_rq->rt_nr_running)
564 enqueue_rt_entity(rt_se);
565 }
bb44e5d1
IM
566}
567
568/*
569 * Put task to the end of the run list without the overhead of dequeue
570 * followed by enqueue.
45c01e82
GH
571 *
572 * Note: We always enqueue the task to the shared-queue, regardless of its
573 * previous position w.r.t. exclusive vs shared. This is so that exclusive RR
574 * tasks fairly round-robin with all tasks on the runqueue, not just other
575 * exclusive tasks.
bb44e5d1 576 */
6f505b16
PZ
577static
578void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
579{
580 struct rt_prio_array *array = &rt_rq->active;
581
45c01e82
GH
582 list_del_init(&rt_se->run_list);
583 list_add_tail(&rt_se->run_list, array->squeue + rt_se_prio(rt_se));
6f505b16
PZ
584}
585
bb44e5d1
IM
586static void requeue_task_rt(struct rq *rq, struct task_struct *p)
587{
6f505b16
PZ
588 struct sched_rt_entity *rt_se = &p->rt;
589 struct rt_rq *rt_rq;
bb44e5d1 590
6f505b16
PZ
591 for_each_sched_rt_entity(rt_se) {
592 rt_rq = rt_rq_of_se(rt_se);
593 requeue_rt_entity(rt_rq, rt_se);
594 }
bb44e5d1
IM
595}
596
6f505b16 597static void yield_task_rt(struct rq *rq)
bb44e5d1 598{
4530d7ab 599 requeue_task_rt(rq, rq->curr);
bb44e5d1
IM
600}
601
e7693a36 602#ifdef CONFIG_SMP
318e0893
GH
603static int find_lowest_rq(struct task_struct *task);
604
e7693a36
GH
605static int select_task_rq_rt(struct task_struct *p, int sync)
606{
318e0893
GH
607 struct rq *rq = task_rq(p);
608
609 /*
e1f47d89
SR
610 * If the current task is an RT task, then
611 * try to see if we can wake this RT task up on another
612 * runqueue. Otherwise simply start this RT task
613 * on its current runqueue.
614 *
615 * We want to avoid overloading runqueues. Even if
616 * the RT task is of higher priority than the current RT task.
617 * RT tasks behave differently than other tasks. If
618 * one gets preempted, we try to push it off to another queue.
619 * So trying to keep a preempting RT task on the same
620 * cache hot CPU will force the running RT task to
621 * a cold CPU. So we waste all the cache for the lower
622 * RT task in hopes of saving some of a RT task
623 * that is just being woken and probably will have
624 * cold cache anyway.
318e0893 625 */
17b3279b 626 if (unlikely(rt_task(rq->curr)) &&
6f505b16 627 (p->rt.nr_cpus_allowed > 1)) {
318e0893
GH
628 int cpu = find_lowest_rq(p);
629
630 return (cpu == -1) ? task_cpu(p) : cpu;
631 }
632
633 /*
634 * Otherwise, just let it ride on the affined RQ and the
635 * post-schedule router will push the preempted task away
636 */
e7693a36
GH
637 return task_cpu(p);
638}
639#endif /* CONFIG_SMP */
640
45c01e82
GH
641static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
642 struct rt_rq *rt_rq);
643
bb44e5d1
IM
644/*
645 * Preempt the current task with a newly woken task if needed:
646 */
647static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
648{
45c01e82 649 if (p->prio < rq->curr->prio) {
bb44e5d1 650 resched_task(rq->curr);
45c01e82
GH
651 return;
652 }
653
654#ifdef CONFIG_SMP
655 /*
656 * If:
657 *
658 * - the newly woken task is of equal priority to the current task
659 * - the newly woken task is non-migratable while current is migratable
660 * - current will be preempted on the next reschedule
661 *
662 * we should check to see if current can readily move to a different
663 * cpu. If so, we will reschedule to allow the push logic to try
664 * to move current somewhere else, making room for our non-migratable
665 * task.
666 */
667 if((p->prio == rq->curr->prio)
668 && p->rt.nr_cpus_allowed == 1
669 && rq->curr->rt.nr_cpus_allowed != 1
670 && pick_next_rt_entity(rq, &rq->rt) != &rq->curr->rt) {
671 cpumask_t mask;
672
673 if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
674 /*
675 * There appears to be other cpus that can accept
676 * current, so lets reschedule to try and push it away
677 */
678 resched_task(rq->curr);
679 }
680#endif
bb44e5d1
IM
681}
682
6f505b16
PZ
683static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
684 struct rt_rq *rt_rq)
bb44e5d1 685{
6f505b16
PZ
686 struct rt_prio_array *array = &rt_rq->active;
687 struct sched_rt_entity *next = NULL;
bb44e5d1
IM
688 struct list_head *queue;
689 int idx;
690
691 idx = sched_find_first_bit(array->bitmap);
6f505b16 692 BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1 693
45c01e82
GH
694 queue = array->xqueue + idx;
695 if (!list_empty(queue))
696 next = list_entry(queue->next, struct sched_rt_entity,
697 run_list);
698 else {
699 queue = array->squeue + idx;
700 next = list_entry(queue->next, struct sched_rt_entity,
701 run_list);
702 }
326587b8 703
6f505b16
PZ
704 return next;
705}
bb44e5d1 706
6f505b16
PZ
707static struct task_struct *pick_next_task_rt(struct rq *rq)
708{
709 struct sched_rt_entity *rt_se;
710 struct task_struct *p;
711 struct rt_rq *rt_rq;
bb44e5d1 712
6f505b16
PZ
713 rt_rq = &rq->rt;
714
715 if (unlikely(!rt_rq->rt_nr_running))
716 return NULL;
717
23b0fdfc 718 if (rt_rq_throttled(rt_rq))
6f505b16
PZ
719 return NULL;
720
721 do {
722 rt_se = pick_next_rt_entity(rq, rt_rq);
326587b8 723 BUG_ON(!rt_se);
6f505b16
PZ
724 rt_rq = group_rt_rq(rt_se);
725 } while (rt_rq);
726
727 p = rt_task_of(rt_se);
728 p->se.exec_start = rq->clock;
729 return p;
bb44e5d1
IM
730}
731
31ee529c 732static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 733{
f1e14ef6 734 update_curr_rt(rq);
bb44e5d1
IM
735 p->se.exec_start = 0;
736}
737
681f3e68 738#ifdef CONFIG_SMP
6f505b16 739
e8fa1362
SR
740/* Only try algorithms three times */
741#define RT_MAX_TRIES 3
742
743static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
744static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
745
f65eda4f
SR
746static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
747{
748 if (!task_running(rq, p) &&
73fe6aae 749 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
6f505b16 750 (p->rt.nr_cpus_allowed > 1))
f65eda4f
SR
751 return 1;
752 return 0;
753}
754
e8fa1362 755/* Return the second highest RT task, NULL otherwise */
79064fbf 756static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa1362 757{
6f505b16
PZ
758 struct task_struct *next = NULL;
759 struct sched_rt_entity *rt_se;
760 struct rt_prio_array *array;
761 struct rt_rq *rt_rq;
e8fa1362
SR
762 int idx;
763
6f505b16
PZ
764 for_each_leaf_rt_rq(rt_rq, rq) {
765 array = &rt_rq->active;
766 idx = sched_find_first_bit(array->bitmap);
767 next_idx:
768 if (idx >= MAX_RT_PRIO)
769 continue;
770 if (next && next->prio < idx)
771 continue;
45c01e82 772 list_for_each_entry(rt_se, array->squeue + idx, run_list) {
6f505b16
PZ
773 struct task_struct *p = rt_task_of(rt_se);
774 if (pick_rt_task(rq, p, cpu)) {
775 next = p;
776 break;
777 }
778 }
779 if (!next) {
780 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
781 goto next_idx;
782 }
f65eda4f
SR
783 }
784
e8fa1362
SR
785 return next;
786}
787
788static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
789
6e1254d2
GH
790static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
791{
792 int first;
793
794 /* "this_cpu" is cheaper to preempt than a remote processor */
795 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
796 return this_cpu;
797
798 first = first_cpu(*mask);
799 if (first != NR_CPUS)
800 return first;
801
802 return -1;
803}
804
805static int find_lowest_rq(struct task_struct *task)
806{
807 struct sched_domain *sd;
808 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
809 int this_cpu = smp_processor_id();
810 int cpu = task_cpu(task);
06f90dbd 811
6e0534f2
GH
812 if (task->rt.nr_cpus_allowed == 1)
813 return -1; /* No other targets possible */
6e1254d2 814
6e0534f2
GH
815 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
816 return -1; /* No targets found */
6e1254d2
GH
817
818 /*
819 * At this point we have built a mask of cpus representing the
820 * lowest priority tasks in the system. Now we want to elect
821 * the best one based on our affinity and topology.
822 *
823 * We prioritize the last cpu that the task executed on since
824 * it is most likely cache-hot in that location.
825 */
826 if (cpu_isset(cpu, *lowest_mask))
827 return cpu;
828
829 /*
830 * Otherwise, we consult the sched_domains span maps to figure
831 * out which cpu is logically closest to our hot cache data.
832 */
833 if (this_cpu == cpu)
834 this_cpu = -1; /* Skip this_cpu opt if the same */
835
836 for_each_domain(cpu, sd) {
837 if (sd->flags & SD_WAKE_AFFINE) {
838 cpumask_t domain_mask;
839 int best_cpu;
840
841 cpus_and(domain_mask, sd->span, *lowest_mask);
842
843 best_cpu = pick_optimal_cpu(this_cpu,
844 &domain_mask);
845 if (best_cpu != -1)
846 return best_cpu;
847 }
848 }
849
850 /*
851 * And finally, if there were no matches within the domains
852 * just give the caller *something* to work with from the compatible
853 * locations.
854 */
855 return pick_optimal_cpu(this_cpu, lowest_mask);
07b4032c
GH
856}
857
858/* Will lock the rq it finds */
4df64c0b 859static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c
GH
860{
861 struct rq *lowest_rq = NULL;
07b4032c 862 int tries;
4df64c0b 863 int cpu;
e8fa1362 864
07b4032c
GH
865 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
866 cpu = find_lowest_rq(task);
867
2de0b463 868 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
869 break;
870
07b4032c
GH
871 lowest_rq = cpu_rq(cpu);
872
e8fa1362 873 /* if the prio of this runqueue changed, try again */
07b4032c 874 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
875 /*
876 * We had to unlock the run queue. In
877 * the mean time, task could have
878 * migrated already or had its affinity changed.
879 * Also make sure that it wasn't scheduled on its rq.
880 */
07b4032c 881 if (unlikely(task_rq(task) != rq ||
4df64c0b
IM
882 !cpu_isset(lowest_rq->cpu,
883 task->cpus_allowed) ||
07b4032c 884 task_running(rq, task) ||
e8fa1362 885 !task->se.on_rq)) {
4df64c0b 886
e8fa1362
SR
887 spin_unlock(&lowest_rq->lock);
888 lowest_rq = NULL;
889 break;
890 }
891 }
892
893 /* If this rq is still suitable use it. */
894 if (lowest_rq->rt.highest_prio > task->prio)
895 break;
896
897 /* try again */
898 spin_unlock(&lowest_rq->lock);
899 lowest_rq = NULL;
900 }
901
902 return lowest_rq;
903}
904
905/*
906 * If the current CPU has more than one RT task, see if the non
907 * running task can migrate over to a CPU that is running a task
908 * of lesser priority.
909 */
697f0a48 910static int push_rt_task(struct rq *rq)
e8fa1362
SR
911{
912 struct task_struct *next_task;
913 struct rq *lowest_rq;
914 int ret = 0;
915 int paranoid = RT_MAX_TRIES;
916
a22d7fc1
GH
917 if (!rq->rt.overloaded)
918 return 0;
919
697f0a48 920 next_task = pick_next_highest_task_rt(rq, -1);
e8fa1362
SR
921 if (!next_task)
922 return 0;
923
924 retry:
697f0a48 925 if (unlikely(next_task == rq->curr)) {
f65eda4f 926 WARN_ON(1);
e8fa1362 927 return 0;
f65eda4f 928 }
e8fa1362
SR
929
930 /*
931 * It's possible that the next_task slipped in of
932 * higher priority than current. If that's the case
933 * just reschedule current.
934 */
697f0a48
GH
935 if (unlikely(next_task->prio < rq->curr->prio)) {
936 resched_task(rq->curr);
e8fa1362
SR
937 return 0;
938 }
939
697f0a48 940 /* We might release rq lock */
e8fa1362
SR
941 get_task_struct(next_task);
942
943 /* find_lock_lowest_rq locks the rq if found */
697f0a48 944 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
945 if (!lowest_rq) {
946 struct task_struct *task;
947 /*
697f0a48 948 * find lock_lowest_rq releases rq->lock
e8fa1362
SR
949 * so it is possible that next_task has changed.
950 * If it has, then try again.
951 */
697f0a48 952 task = pick_next_highest_task_rt(rq, -1);
e8fa1362
SR
953 if (unlikely(task != next_task) && task && paranoid--) {
954 put_task_struct(next_task);
955 next_task = task;
956 goto retry;
957 }
958 goto out;
959 }
960
697f0a48 961 deactivate_task(rq, next_task, 0);
e8fa1362
SR
962 set_task_cpu(next_task, lowest_rq->cpu);
963 activate_task(lowest_rq, next_task, 0);
964
965 resched_task(lowest_rq->curr);
966
967 spin_unlock(&lowest_rq->lock);
968
969 ret = 1;
970out:
971 put_task_struct(next_task);
972
973 return ret;
974}
975
976/*
977 * TODO: Currently we just use the second highest prio task on
978 * the queue, and stop when it can't migrate (or there's
979 * no more RT tasks). There may be a case where a lower
980 * priority RT task has a different affinity than the
981 * higher RT task. In this case the lower RT task could
982 * possibly be able to migrate where as the higher priority
983 * RT task could not. We currently ignore this issue.
984 * Enhancements are welcome!
985 */
986static void push_rt_tasks(struct rq *rq)
987{
988 /* push_rt_task will return true if it moved an RT */
989 while (push_rt_task(rq))
990 ;
991}
992
f65eda4f
SR
993static int pull_rt_task(struct rq *this_rq)
994{
80bf3171
IM
995 int this_cpu = this_rq->cpu, ret = 0, cpu;
996 struct task_struct *p, *next;
f65eda4f 997 struct rq *src_rq;
f65eda4f 998
637f5085 999 if (likely(!rt_overloaded(this_rq)))
f65eda4f
SR
1000 return 0;
1001
1002 next = pick_next_task_rt(this_rq);
1003
637f5085 1004 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
f65eda4f
SR
1005 if (this_cpu == cpu)
1006 continue;
1007
1008 src_rq = cpu_rq(cpu);
f65eda4f
SR
1009 /*
1010 * We can potentially drop this_rq's lock in
1011 * double_lock_balance, and another CPU could
1012 * steal our next task - hence we must cause
1013 * the caller to recalculate the next task
1014 * in that case:
1015 */
1016 if (double_lock_balance(this_rq, src_rq)) {
1017 struct task_struct *old_next = next;
80bf3171 1018
f65eda4f
SR
1019 next = pick_next_task_rt(this_rq);
1020 if (next != old_next)
1021 ret = 1;
1022 }
1023
1024 /*
1025 * Are there still pullable RT tasks?
1026 */
614ee1f6
MG
1027 if (src_rq->rt.rt_nr_running <= 1)
1028 goto skip;
f65eda4f 1029
f65eda4f
SR
1030 p = pick_next_highest_task_rt(src_rq, this_cpu);
1031
1032 /*
1033 * Do we have an RT task that preempts
1034 * the to-be-scheduled task?
1035 */
1036 if (p && (!next || (p->prio < next->prio))) {
1037 WARN_ON(p == src_rq->curr);
1038 WARN_ON(!p->se.on_rq);
1039
1040 /*
1041 * There's a chance that p is higher in priority
1042 * than what's currently running on its cpu.
1043 * This is just that p is wakeing up and hasn't
1044 * had a chance to schedule. We only pull
1045 * p if it is lower in priority than the
1046 * current task on the run queue or
1047 * this_rq next task is lower in prio than
1048 * the current task on that rq.
1049 */
1050 if (p->prio < src_rq->curr->prio ||
1051 (next && next->prio < src_rq->curr->prio))
614ee1f6 1052 goto skip;
f65eda4f
SR
1053
1054 ret = 1;
1055
1056 deactivate_task(src_rq, p, 0);
1057 set_task_cpu(p, this_cpu);
1058 activate_task(this_rq, p, 0);
1059 /*
1060 * We continue with the search, just in
1061 * case there's an even higher prio task
1062 * in another runqueue. (low likelyhood
1063 * but possible)
80bf3171 1064 *
f65eda4f
SR
1065 * Update next so that we won't pick a task
1066 * on another cpu with a priority lower (or equal)
1067 * than the one we just picked.
1068 */
1069 next = p;
1070
1071 }
614ee1f6 1072 skip:
f65eda4f
SR
1073 spin_unlock(&src_rq->lock);
1074 }
1075
1076 return ret;
1077}
1078
9a897c5a 1079static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f
SR
1080{
1081 /* Try to pull RT tasks here if we lower this rq's prio */
7f51f298 1082 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
f65eda4f
SR
1083 pull_rt_task(rq);
1084}
1085
9a897c5a 1086static void post_schedule_rt(struct rq *rq)
e8fa1362
SR
1087{
1088 /*
1089 * If we have more than one rt_task queued, then
1090 * see if we can push the other rt_tasks off to other CPUS.
1091 * Note we may release the rq lock, and since
1092 * the lock was owned by prev, we need to release it
1093 * first via finish_lock_switch and then reaquire it here.
1094 */
a22d7fc1 1095 if (unlikely(rq->rt.overloaded)) {
e8fa1362
SR
1096 spin_lock_irq(&rq->lock);
1097 push_rt_tasks(rq);
1098 spin_unlock_irq(&rq->lock);
1099 }
1100}
1101
8ae121ac
GH
1102/*
1103 * If we are not running and we are not going to reschedule soon, we should
1104 * try to push tasks away now
1105 */
9a897c5a 1106static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
4642dafd 1107{
9a897c5a 1108 if (!task_running(rq, p) &&
8ae121ac 1109 !test_tsk_need_resched(rq->curr) &&
a22d7fc1 1110 rq->rt.overloaded)
4642dafd
SR
1111 push_rt_tasks(rq);
1112}
1113
43010659 1114static unsigned long
bb44e5d1 1115load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f
PW
1116 unsigned long max_load_move,
1117 struct sched_domain *sd, enum cpu_idle_type idle,
1118 int *all_pinned, int *this_best_prio)
bb44e5d1 1119{
c7a1e46a
SR
1120 /* don't touch RT tasks */
1121 return 0;
e1d1484f
PW
1122}
1123
1124static int
1125move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1126 struct sched_domain *sd, enum cpu_idle_type idle)
1127{
c7a1e46a
SR
1128 /* don't touch RT tasks */
1129 return 0;
bb44e5d1 1130}
deeeccd4 1131
cd8ba7cd
MT
1132static void set_cpus_allowed_rt(struct task_struct *p,
1133 const cpumask_t *new_mask)
73fe6aae
GH
1134{
1135 int weight = cpus_weight(*new_mask);
1136
1137 BUG_ON(!rt_task(p));
1138
1139 /*
1140 * Update the migration status of the RQ if we have an RT task
1141 * which is running AND changing its weight value.
1142 */
6f505b16 1143 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
73fe6aae
GH
1144 struct rq *rq = task_rq(p);
1145
6f505b16 1146 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
73fe6aae 1147 rq->rt.rt_nr_migratory++;
6f505b16 1148 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
73fe6aae
GH
1149 BUG_ON(!rq->rt.rt_nr_migratory);
1150 rq->rt.rt_nr_migratory--;
1151 }
1152
1153 update_rt_migration(rq);
45c01e82
GH
1154
1155 if (unlikely(weight == 1 || p->rt.nr_cpus_allowed == 1))
1156 /*
1157 * If either the new or old weight is a "1", we need
1158 * to requeue to properly move between shared and
1159 * exclusive queues.
1160 */
1161 requeue_task_rt(rq, p);
73fe6aae
GH
1162 }
1163
1164 p->cpus_allowed = *new_mask;
6f505b16 1165 p->rt.nr_cpus_allowed = weight;
73fe6aae 1166}
deeeccd4 1167
bdd7c81b 1168/* Assumes rq->lock is held */
1f11eb6a 1169static void rq_online_rt(struct rq *rq)
bdd7c81b
IM
1170{
1171 if (rq->rt.overloaded)
1172 rt_set_overload(rq);
6e0534f2
GH
1173
1174 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
bdd7c81b
IM
1175}
1176
1177/* Assumes rq->lock is held */
1f11eb6a 1178static void rq_offline_rt(struct rq *rq)
bdd7c81b
IM
1179{
1180 if (rq->rt.overloaded)
1181 rt_clear_overload(rq);
6e0534f2
GH
1182
1183 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b 1184}
cb469845
SR
1185
1186/*
1187 * When switch from the rt queue, we bring ourselves to a position
1188 * that we might want to pull RT tasks from other runqueues.
1189 */
1190static void switched_from_rt(struct rq *rq, struct task_struct *p,
1191 int running)
1192{
1193 /*
1194 * If there are other RT tasks then we will reschedule
1195 * and the scheduling of the other RT tasks will handle
1196 * the balancing. But if we are the last RT task
1197 * we may need to handle the pulling of RT tasks
1198 * now.
1199 */
1200 if (!rq->rt.rt_nr_running)
1201 pull_rt_task(rq);
1202}
1203#endif /* CONFIG_SMP */
1204
1205/*
1206 * When switching a task to RT, we may overload the runqueue
1207 * with RT tasks. In this case we try to push them off to
1208 * other runqueues.
1209 */
1210static void switched_to_rt(struct rq *rq, struct task_struct *p,
1211 int running)
1212{
1213 int check_resched = 1;
1214
1215 /*
1216 * If we are already running, then there's nothing
1217 * that needs to be done. But if we are not running
1218 * we may need to preempt the current running task.
1219 * If that current running task is also an RT task
1220 * then see if we can move to another run queue.
1221 */
1222 if (!running) {
1223#ifdef CONFIG_SMP
1224 if (rq->rt.overloaded && push_rt_task(rq) &&
1225 /* Don't resched if we changed runqueues */
1226 rq != task_rq(p))
1227 check_resched = 0;
1228#endif /* CONFIG_SMP */
1229 if (check_resched && p->prio < rq->curr->prio)
1230 resched_task(rq->curr);
1231 }
1232}
1233
1234/*
1235 * Priority of the task has changed. This may cause
1236 * us to initiate a push or pull.
1237 */
1238static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1239 int oldprio, int running)
1240{
1241 if (running) {
1242#ifdef CONFIG_SMP
1243 /*
1244 * If our priority decreases while running, we
1245 * may need to pull tasks to this runqueue.
1246 */
1247 if (oldprio < p->prio)
1248 pull_rt_task(rq);
1249 /*
1250 * If there's a higher priority task waiting to run
6fa46fa5
SR
1251 * then reschedule. Note, the above pull_rt_task
1252 * can release the rq lock and p could migrate.
1253 * Only reschedule if p is still on the same runqueue.
cb469845 1254 */
6fa46fa5 1255 if (p->prio > rq->rt.highest_prio && rq->curr == p)
cb469845
SR
1256 resched_task(p);
1257#else
1258 /* For UP simply resched on drop of prio */
1259 if (oldprio < p->prio)
1260 resched_task(p);
e8fa1362 1261#endif /* CONFIG_SMP */
cb469845
SR
1262 } else {
1263 /*
1264 * This task is not running, but if it is
1265 * greater than the current running task
1266 * then reschedule.
1267 */
1268 if (p->prio < rq->curr->prio)
1269 resched_task(rq->curr);
1270 }
1271}
1272
78f2c7db
PZ
1273static void watchdog(struct rq *rq, struct task_struct *p)
1274{
1275 unsigned long soft, hard;
1276
1277 if (!p->signal)
1278 return;
1279
1280 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1281 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1282
1283 if (soft != RLIM_INFINITY) {
1284 unsigned long next;
1285
1286 p->rt.timeout++;
1287 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd50 1288 if (p->rt.timeout > next)
78f2c7db
PZ
1289 p->it_sched_expires = p->se.sum_exec_runtime;
1290 }
1291}
bb44e5d1 1292
8f4d37ec 1293static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1 1294{
67e2be02
PZ
1295 update_curr_rt(rq);
1296
78f2c7db
PZ
1297 watchdog(rq, p);
1298
bb44e5d1
IM
1299 /*
1300 * RR tasks need a special form of timeslice management.
1301 * FIFO tasks have no timeslices.
1302 */
1303 if (p->policy != SCHED_RR)
1304 return;
1305
fa717060 1306 if (--p->rt.time_slice)
bb44e5d1
IM
1307 return;
1308
fa717060 1309 p->rt.time_slice = DEF_TIMESLICE;
bb44e5d1 1310
98fbc798
DA
1311 /*
1312 * Requeue to the end of queue if we are not the only element
1313 * on the queue:
1314 */
fa717060 1315 if (p->rt.run_list.prev != p->rt.run_list.next) {
98fbc798
DA
1316 requeue_task_rt(rq, p);
1317 set_tsk_need_resched(p);
1318 }
bb44e5d1
IM
1319}
1320
83b699ed
SV
1321static void set_curr_task_rt(struct rq *rq)
1322{
1323 struct task_struct *p = rq->curr;
1324
1325 p->se.exec_start = rq->clock;
1326}
1327
2abdad0a 1328static const struct sched_class rt_sched_class = {
5522d5d5 1329 .next = &fair_sched_class,
bb44e5d1
IM
1330 .enqueue_task = enqueue_task_rt,
1331 .dequeue_task = dequeue_task_rt,
1332 .yield_task = yield_task_rt,
e7693a36
GH
1333#ifdef CONFIG_SMP
1334 .select_task_rq = select_task_rq_rt,
1335#endif /* CONFIG_SMP */
bb44e5d1
IM
1336
1337 .check_preempt_curr = check_preempt_curr_rt,
1338
1339 .pick_next_task = pick_next_task_rt,
1340 .put_prev_task = put_prev_task_rt,
1341
681f3e68 1342#ifdef CONFIG_SMP
bb44e5d1 1343 .load_balance = load_balance_rt,
e1d1484f 1344 .move_one_task = move_one_task_rt,
73fe6aae 1345 .set_cpus_allowed = set_cpus_allowed_rt,
1f11eb6a
GH
1346 .rq_online = rq_online_rt,
1347 .rq_offline = rq_offline_rt,
9a897c5a
SR
1348 .pre_schedule = pre_schedule_rt,
1349 .post_schedule = post_schedule_rt,
1350 .task_wake_up = task_wake_up_rt,
cb469845 1351 .switched_from = switched_from_rt,
681f3e68 1352#endif
bb44e5d1 1353
83b699ed 1354 .set_curr_task = set_curr_task_rt,
bb44e5d1 1355 .task_tick = task_tick_rt,
cb469845
SR
1356
1357 .prio_changed = prio_changed_rt,
1358 .switched_to = switched_to_rt,
bb44e5d1 1359};