]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched_fair.c
sched: make cpu_shares_{show,store}() static
[net-next-2.6.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
23/*
21805085
PZ
24 * Targeted preemption latency for CPU-bound tasks:
25 * (default: 20ms, units: nanoseconds)
bf0f6f24 26 *
21805085 27 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
28 * 'timeslice length' - timeslices in CFS are of variable length
29 * and have no persistent notion like in traditional, time-slice
30 * based scheduling concepts.
bf0f6f24 31 *
d274a4ce
IM
32 * (to see the precise effective timeslice length of your workload,
33 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 34 */
2bd8e6d4
IM
35const_debug unsigned int sysctl_sched_latency = 20000000ULL;
36
37/*
38 * After fork, child runs first. (default) If set to 0 then
39 * parent will (try to) run first.
40 */
41const_debug unsigned int sysctl_sched_child_runs_first = 1;
21805085
PZ
42
43/*
44 * Minimal preemption granularity for CPU-bound tasks:
45 * (default: 2 msec, units: nanoseconds)
46 */
5f6d858e 47const_debug unsigned int sysctl_sched_nr_latency = 20;
bf0f6f24 48
1799e35d
IM
49/*
50 * sys_sched_yield() compat mode
51 *
52 * This option switches the agressive yield implementation of the
53 * old scheduler back on.
54 */
55unsigned int __read_mostly sysctl_sched_compat_yield;
56
bf0f6f24
IM
57/*
58 * SCHED_BATCH wake-up granularity.
155bb293 59 * (default: 10 msec, units: nanoseconds)
bf0f6f24
IM
60 *
61 * This option delays the preemption effects of decoupled workloads
62 * and reduces their over-scheduling. Synchronous workloads will still
63 * have immediate wakeup/sleep latencies.
64 */
155bb293 65const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
bf0f6f24
IM
66
67/*
68 * SCHED_OTHER wake-up granularity.
155bb293 69 * (default: 10 msec, units: nanoseconds)
bf0f6f24
IM
70 *
71 * This option delays the preemption effects of decoupled workloads
72 * and reduces their over-scheduling. Synchronous workloads will still
73 * have immediate wakeup/sleep latencies.
74 */
155bb293 75const_debug unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
bf0f6f24 76
da84d961
IM
77const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
78
bf0f6f24
IM
79/**************************************************************
80 * CFS operations on generic schedulable entities:
81 */
82
62160e3f 83#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 84
62160e3f 85/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
86static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
87{
62160e3f 88 return cfs_rq->rq;
bf0f6f24
IM
89}
90
62160e3f
IM
91/* An entity is a task if it doesn't "own" a runqueue */
92#define entity_is_task(se) (!se->my_q)
bf0f6f24 93
62160e3f 94#else /* CONFIG_FAIR_GROUP_SCHED */
bf0f6f24 95
62160e3f
IM
96static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
97{
98 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
99}
100
101#define entity_is_task(se) 1
102
bf0f6f24
IM
103#endif /* CONFIG_FAIR_GROUP_SCHED */
104
105static inline struct task_struct *task_of(struct sched_entity *se)
106{
107 return container_of(se, struct task_struct, se);
108}
109
110
111/**************************************************************
112 * Scheduling class tree data structure manipulation methods:
113 */
114
0702e3eb 115static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 116{
368059a9
PZ
117 s64 delta = (s64)(vruntime - min_vruntime);
118 if (delta > 0)
02e0431a
PZ
119 min_vruntime = vruntime;
120
121 return min_vruntime;
122}
123
0702e3eb 124static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
125{
126 s64 delta = (s64)(vruntime - min_vruntime);
127 if (delta < 0)
128 min_vruntime = vruntime;
129
130 return min_vruntime;
131}
132
0702e3eb 133static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
9014623c 134{
30cfdcfc 135 return se->vruntime - cfs_rq->min_vruntime;
9014623c
PZ
136}
137
bf0f6f24
IM
138/*
139 * Enqueue an entity into the rb-tree:
140 */
0702e3eb 141static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
142{
143 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
144 struct rb_node *parent = NULL;
145 struct sched_entity *entry;
9014623c 146 s64 key = entity_key(cfs_rq, se);
bf0f6f24
IM
147 int leftmost = 1;
148
149 /*
150 * Find the right place in the rbtree:
151 */
152 while (*link) {
153 parent = *link;
154 entry = rb_entry(parent, struct sched_entity, run_node);
155 /*
156 * We dont care about collisions. Nodes with
157 * the same key stay together.
158 */
9014623c 159 if (key < entity_key(cfs_rq, entry)) {
bf0f6f24
IM
160 link = &parent->rb_left;
161 } else {
162 link = &parent->rb_right;
163 leftmost = 0;
164 }
165 }
166
167 /*
168 * Maintain a cache of leftmost tree entries (it is frequently
169 * used):
170 */
171 if (leftmost)
57cb499d 172 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
173
174 rb_link_node(&se->run_node, parent, link);
175 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
176}
177
0702e3eb 178static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
179{
180 if (cfs_rq->rb_leftmost == &se->run_node)
57cb499d 181 cfs_rq->rb_leftmost = rb_next(&se->run_node);
e9acbff6 182
bf0f6f24 183 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
184}
185
186static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
187{
188 return cfs_rq->rb_leftmost;
189}
190
191static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
192{
193 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
194}
195
aeb73b04
PZ
196static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
197{
198 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
199 struct sched_entity *se = NULL;
200 struct rb_node *parent;
201
202 while (*link) {
203 parent = *link;
204 se = rb_entry(parent, struct sched_entity, run_node);
205 link = &parent->rb_right;
206 }
207
208 return se;
209}
210
bf0f6f24
IM
211/**************************************************************
212 * Scheduling class statistics methods:
213 */
214
647e7cac
IM
215
216/*
217 * The idea is to set a period in which each task runs once.
218 *
219 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
220 * this period because otherwise the slices get too small.
221 *
222 * p = (nr <= nl) ? l : l*nr/nl
223 */
4d78e7b6
PZ
224static u64 __sched_period(unsigned long nr_running)
225{
226 u64 period = sysctl_sched_latency;
5f6d858e 227 unsigned long nr_latency = sysctl_sched_nr_latency;
4d78e7b6
PZ
228
229 if (unlikely(nr_running > nr_latency)) {
230 period *= nr_running;
231 do_div(period, nr_latency);
232 }
233
234 return period;
235}
236
647e7cac
IM
237/*
238 * We calculate the wall-time slice from the period by taking a part
239 * proportional to the weight.
240 *
241 * s = p*w/rw
242 */
6d0f0ebd 243static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 244{
647e7cac 245 u64 slice = __sched_period(cfs_rq->nr_running);
21805085 246
647e7cac
IM
247 slice *= se->load.weight;
248 do_div(slice, cfs_rq->load.weight);
21805085 249
647e7cac 250 return slice;
bf0f6f24
IM
251}
252
647e7cac
IM
253/*
254 * We calculate the vruntime slice.
255 *
256 * vs = s/w = p/rw
257 */
258static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
67e9fb2a 259{
647e7cac 260 u64 vslice = __sched_period(nr_running);
67e9fb2a 261
647e7cac 262 do_div(vslice, rq_weight);
67e9fb2a 263
647e7cac
IM
264 return vslice;
265}
5f6d858e 266
647e7cac
IM
267static u64 sched_vslice(struct cfs_rq *cfs_rq)
268{
269 return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
270}
271
272static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
273{
274 return __sched_vslice(cfs_rq->load.weight + se->load.weight,
275 cfs_rq->nr_running + 1);
67e9fb2a
PZ
276}
277
bf0f6f24
IM
278/*
279 * Update the current task's runtime statistics. Skip current tasks that
280 * are not in our scheduling class.
281 */
282static inline void
8ebc91d9
IM
283__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
284 unsigned long delta_exec)
bf0f6f24 285{
bbdba7c0 286 unsigned long delta_exec_weighted;
b0ffd246 287 u64 vruntime;
bf0f6f24 288
8179ca23 289 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
bf0f6f24
IM
290
291 curr->sum_exec_runtime += delta_exec;
7a62eabc 292 schedstat_add(cfs_rq, exec_clock, delta_exec);
e9acbff6
IM
293 delta_exec_weighted = delta_exec;
294 if (unlikely(curr->load.weight != NICE_0_LOAD)) {
295 delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
296 &curr->load);
297 }
298 curr->vruntime += delta_exec_weighted;
02e0431a
PZ
299
300 /*
301 * maintain cfs_rq->min_vruntime to be a monotonic increasing
302 * value tracking the leftmost vruntime in the tree.
303 */
304 if (first_fair(cfs_rq)) {
b0ffd246
PZ
305 vruntime = min_vruntime(curr->vruntime,
306 __pick_next_entity(cfs_rq)->vruntime);
02e0431a 307 } else
b0ffd246 308 vruntime = curr->vruntime;
02e0431a
PZ
309
310 cfs_rq->min_vruntime =
b0ffd246 311 max_vruntime(cfs_rq->min_vruntime, vruntime);
bf0f6f24
IM
312}
313
b7cc0896 314static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 315{
429d43bc 316 struct sched_entity *curr = cfs_rq->curr;
8ebc91d9 317 u64 now = rq_of(cfs_rq)->clock;
bf0f6f24
IM
318 unsigned long delta_exec;
319
320 if (unlikely(!curr))
321 return;
322
323 /*
324 * Get the amount of time the current task was running
325 * since the last time we changed load (this cannot
326 * overflow on 32 bits):
327 */
8ebc91d9 328 delta_exec = (unsigned long)(now - curr->exec_start);
bf0f6f24 329
8ebc91d9
IM
330 __update_curr(cfs_rq, curr, delta_exec);
331 curr->exec_start = now;
bf0f6f24
IM
332}
333
334static inline void
5870db5b 335update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 336{
d281918d 337 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
338}
339
bf0f6f24
IM
340/*
341 * Task is being enqueued - update stats:
342 */
d2417e5a 343static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 344{
bf0f6f24
IM
345 /*
346 * Are we enqueueing a waiting task? (for current tasks
347 * a dequeue/enqueue event is a NOP)
348 */
429d43bc 349 if (se != cfs_rq->curr)
5870db5b 350 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
351}
352
bf0f6f24 353static void
9ef0a961 354update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 355{
bbdba7c0
IM
356 schedstat_set(se->wait_max, max(se->wait_max,
357 rq_of(cfs_rq)->clock - se->wait_start));
6cfb0d5d 358 schedstat_set(se->wait_start, 0);
bf0f6f24
IM
359}
360
361static inline void
19b6a2e3 362update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 363{
bf0f6f24
IM
364 /*
365 * Mark the end of the wait period if dequeueing a
366 * waiting task:
367 */
429d43bc 368 if (se != cfs_rq->curr)
9ef0a961 369 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
370}
371
372/*
373 * We are picking a new current task - update its stats:
374 */
375static inline void
79303e9e 376update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
377{
378 /*
379 * We are starting a new run period:
380 */
d281918d 381 se->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
382}
383
bf0f6f24
IM
384/**************************************************
385 * Scheduling class queueing methods:
386 */
387
30cfdcfc
DA
388static void
389account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
390{
391 update_load_add(&cfs_rq->load, se->load.weight);
392 cfs_rq->nr_running++;
393 se->on_rq = 1;
394}
395
396static void
397account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
398{
399 update_load_sub(&cfs_rq->load, se->load.weight);
400 cfs_rq->nr_running--;
401 se->on_rq = 0;
402}
403
2396af69 404static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 405{
bf0f6f24
IM
406#ifdef CONFIG_SCHEDSTATS
407 if (se->sleep_start) {
d281918d 408 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
bf0f6f24
IM
409
410 if ((s64)delta < 0)
411 delta = 0;
412
413 if (unlikely(delta > se->sleep_max))
414 se->sleep_max = delta;
415
416 se->sleep_start = 0;
417 se->sum_sleep_runtime += delta;
418 }
419 if (se->block_start) {
d281918d 420 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
bf0f6f24
IM
421
422 if ((s64)delta < 0)
423 delta = 0;
424
425 if (unlikely(delta > se->block_max))
426 se->block_max = delta;
427
428 se->block_start = 0;
429 se->sum_sleep_runtime += delta;
30084fbd
IM
430
431 /*
432 * Blocking time is in units of nanosecs, so shift by 20 to
433 * get a milliseconds-range estimation of the amount of
434 * time that the task spent sleeping:
435 */
436 if (unlikely(prof_on == SLEEP_PROFILING)) {
e22f5bbf
IM
437 struct task_struct *tsk = task_of(se);
438
30084fbd
IM
439 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
440 delta >> 20);
441 }
bf0f6f24
IM
442 }
443#endif
444}
445
ddc97297
PZ
446static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
447{
448#ifdef CONFIG_SCHED_DEBUG
449 s64 d = se->vruntime - cfs_rq->min_vruntime;
450
451 if (d < 0)
452 d = -d;
453
454 if (d > 3*sysctl_sched_latency)
455 schedstat_inc(cfs_rq, nr_spread_over);
456#endif
457}
458
aeb73b04
PZ
459static void
460place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
461{
67e9fb2a 462 u64 vruntime;
aeb73b04 463
67e9fb2a 464 vruntime = cfs_rq->min_vruntime;
94dfb5e7 465
06877c33 466 if (sched_feat(TREE_AVG)) {
94dfb5e7
PZ
467 struct sched_entity *last = __pick_last_entity(cfs_rq);
468 if (last) {
67e9fb2a
PZ
469 vruntime += last->vruntime;
470 vruntime >>= 1;
94dfb5e7 471 }
67e9fb2a 472 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
647e7cac 473 vruntime += sched_vslice(cfs_rq)/2;
94dfb5e7
PZ
474
475 if (initial && sched_feat(START_DEBIT))
647e7cac 476 vruntime += sched_vslice_add(cfs_rq, se);
aeb73b04 477
8465e792 478 if (!initial) {
e62dd02e
DA
479 if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
480 task_of(se)->policy != SCHED_BATCH)
94359f05
IM
481 vruntime -= sysctl_sched_latency;
482
b8487b92 483 vruntime = max_t(s64, vruntime, se->vruntime);
aeb73b04
PZ
484 }
485
67e9fb2a
PZ
486 se->vruntime = vruntime;
487
aeb73b04
PZ
488}
489
bf0f6f24 490static void
83b699ed 491enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
bf0f6f24
IM
492{
493 /*
a2a2d680 494 * Update run-time statistics of the 'current'.
bf0f6f24 495 */
b7cc0896 496 update_curr(cfs_rq);
bf0f6f24 497
e9acbff6 498 if (wakeup) {
aeb73b04 499 place_entity(cfs_rq, se, 0);
2396af69 500 enqueue_sleeper(cfs_rq, se);
e9acbff6 501 }
bf0f6f24 502
d2417e5a 503 update_stats_enqueue(cfs_rq, se);
ddc97297 504 check_spread(cfs_rq, se);
83b699ed
SV
505 if (se != cfs_rq->curr)
506 __enqueue_entity(cfs_rq, se);
30cfdcfc 507 account_entity_enqueue(cfs_rq, se);
bf0f6f24
IM
508}
509
510static void
525c2716 511dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
bf0f6f24 512{
a2a2d680
DA
513 /*
514 * Update run-time statistics of the 'current'.
515 */
516 update_curr(cfs_rq);
517
19b6a2e3 518 update_stats_dequeue(cfs_rq, se);
db36cc7d 519 if (sleep) {
95938a35 520 se->peer_preempt = 0;
67e9fb2a 521#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
522 if (entity_is_task(se)) {
523 struct task_struct *tsk = task_of(se);
524
525 if (tsk->state & TASK_INTERRUPTIBLE)
d281918d 526 se->sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 527 if (tsk->state & TASK_UNINTERRUPTIBLE)
d281918d 528 se->block_start = rq_of(cfs_rq)->clock;
bf0f6f24 529 }
db36cc7d 530#endif
67e9fb2a
PZ
531 }
532
83b699ed 533 if (se != cfs_rq->curr)
30cfdcfc
DA
534 __dequeue_entity(cfs_rq, se);
535 account_entity_dequeue(cfs_rq, se);
bf0f6f24
IM
536}
537
538/*
539 * Preempt the current task with a newly woken task if needed:
540 */
7c92e54f 541static void
2e09bf55 542check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 543{
11697830
PZ
544 unsigned long ideal_runtime, delta_exec;
545
6d0f0ebd 546 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 547 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
95938a35
MG
548 if (delta_exec > ideal_runtime ||
549 (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt))
bf0f6f24 550 resched_task(rq_of(cfs_rq)->curr);
95938a35 551 curr->peer_preempt = 0;
bf0f6f24
IM
552}
553
83b699ed 554static void
8494f412 555set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 556{
83b699ed
SV
557 /* 'current' is not kept within the tree. */
558 if (se->on_rq) {
559 /*
560 * Any task has to be enqueued before it get to execute on
561 * a CPU. So account for the time it spent waiting on the
562 * runqueue.
563 */
564 update_stats_wait_end(cfs_rq, se);
565 __dequeue_entity(cfs_rq, se);
566 }
567
79303e9e 568 update_stats_curr_start(cfs_rq, se);
429d43bc 569 cfs_rq->curr = se;
eba1ed4b
IM
570#ifdef CONFIG_SCHEDSTATS
571 /*
572 * Track our maximum slice length, if the CPU's load is at
573 * least twice that of our own weight (i.e. dont track it
574 * when there are only lesser-weight tasks around):
575 */
495eca49 576 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
eba1ed4b
IM
577 se->slice_max = max(se->slice_max,
578 se->sum_exec_runtime - se->prev_sum_exec_runtime);
579 }
580#endif
4a55b450 581 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
582}
583
9948f4b2 584static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
bf0f6f24 585{
08ec3df5 586 struct sched_entity *se = NULL;
bf0f6f24 587
08ec3df5
DA
588 if (first_fair(cfs_rq)) {
589 se = __pick_next_entity(cfs_rq);
590 set_next_entity(cfs_rq, se);
591 }
bf0f6f24
IM
592
593 return se;
594}
595
ab6cde26 596static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
597{
598 /*
599 * If still on the runqueue then deactivate_task()
600 * was not called and update_curr() has to be done:
601 */
602 if (prev->on_rq)
b7cc0896 603 update_curr(cfs_rq);
bf0f6f24 604
ddc97297 605 check_spread(cfs_rq, prev);
30cfdcfc 606 if (prev->on_rq) {
5870db5b 607 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
608 /* Put 'current' back into the tree. */
609 __enqueue_entity(cfs_rq, prev);
610 }
429d43bc 611 cfs_rq->curr = NULL;
bf0f6f24
IM
612}
613
614static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
615{
bf0f6f24 616 /*
30cfdcfc 617 * Update run-time statistics of the 'current'.
bf0f6f24 618 */
30cfdcfc 619 update_curr(cfs_rq);
bf0f6f24 620
ce6c1311 621 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
2e09bf55 622 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
623}
624
625/**************************************************
626 * CFS operations on tasks:
627 */
628
629#ifdef CONFIG_FAIR_GROUP_SCHED
630
631/* Walk up scheduling entities hierarchy */
632#define for_each_sched_entity(se) \
633 for (; se; se = se->parent)
634
635static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
636{
637 return p->se.cfs_rq;
638}
639
640/* runqueue on which this entity is (to be) queued */
641static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
642{
643 return se->cfs_rq;
644}
645
646/* runqueue "owned" by this group */
647static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
648{
649 return grp->my_q;
650}
651
652/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
653 * another cpu ('this_cpu')
654 */
655static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
656{
29f59db3 657 return cfs_rq->tg->cfs_rq[this_cpu];
bf0f6f24
IM
658}
659
660/* Iterate thr' all leaf cfs_rq's on a runqueue */
661#define for_each_leaf_cfs_rq(rq, cfs_rq) \
662 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
663
fad095a7
SV
664/* Do the two (enqueued) entities belong to the same group ? */
665static inline int
666is_same_group(struct sched_entity *se, struct sched_entity *pse)
bf0f6f24 667{
fad095a7 668 if (se->cfs_rq == pse->cfs_rq)
bf0f6f24
IM
669 return 1;
670
671 return 0;
672}
673
fad095a7
SV
674static inline struct sched_entity *parent_entity(struct sched_entity *se)
675{
676 return se->parent;
677}
678
bf0f6f24
IM
679#else /* CONFIG_FAIR_GROUP_SCHED */
680
681#define for_each_sched_entity(se) \
682 for (; se; se = NULL)
683
684static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
685{
686 return &task_rq(p)->cfs;
687}
688
689static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
690{
691 struct task_struct *p = task_of(se);
692 struct rq *rq = task_rq(p);
693
694 return &rq->cfs;
695}
696
697/* runqueue "owned" by this group */
698static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
699{
700 return NULL;
701}
702
703static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
704{
705 return &cpu_rq(this_cpu)->cfs;
706}
707
708#define for_each_leaf_cfs_rq(rq, cfs_rq) \
709 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
710
fad095a7
SV
711static inline int
712is_same_group(struct sched_entity *se, struct sched_entity *pse)
bf0f6f24
IM
713{
714 return 1;
715}
716
fad095a7
SV
717static inline struct sched_entity *parent_entity(struct sched_entity *se)
718{
719 return NULL;
720}
721
bf0f6f24
IM
722#endif /* CONFIG_FAIR_GROUP_SCHED */
723
724/*
725 * The enqueue_task method is called before nr_running is
726 * increased. Here we update the fair scheduling stats and
727 * then put the task into the rbtree:
728 */
fd390f6a 729static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
bf0f6f24
IM
730{
731 struct cfs_rq *cfs_rq;
732 struct sched_entity *se = &p->se;
733
734 for_each_sched_entity(se) {
735 if (se->on_rq)
736 break;
737 cfs_rq = cfs_rq_of(se);
83b699ed 738 enqueue_entity(cfs_rq, se, wakeup);
b9fa3df3 739 wakeup = 1;
bf0f6f24
IM
740 }
741}
742
743/*
744 * The dequeue_task method is called before nr_running is
745 * decreased. We remove the task from the rbtree and
746 * update the fair scheduling stats:
747 */
f02231e5 748static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
bf0f6f24
IM
749{
750 struct cfs_rq *cfs_rq;
751 struct sched_entity *se = &p->se;
752
753 for_each_sched_entity(se) {
754 cfs_rq = cfs_rq_of(se);
525c2716 755 dequeue_entity(cfs_rq, se, sleep);
bf0f6f24
IM
756 /* Don't dequeue parent if it has other entities besides us */
757 if (cfs_rq->load.weight)
758 break;
b9fa3df3 759 sleep = 1;
bf0f6f24
IM
760 }
761}
762
763/*
1799e35d
IM
764 * sched_yield() support is very simple - we dequeue and enqueue.
765 *
766 * If compat_yield is turned on then we requeue to the end of the tree.
bf0f6f24 767 */
4530d7ab 768static void yield_task_fair(struct rq *rq)
bf0f6f24 769{
72ea22f8 770 struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
4530d7ab 771 struct sched_entity *rightmost, *se = &rq->curr->se;
bf0f6f24
IM
772
773 /*
1799e35d
IM
774 * Are we the only task in the tree?
775 */
776 if (unlikely(cfs_rq->nr_running == 1))
777 return;
778
779 if (likely(!sysctl_sched_compat_yield)) {
780 __update_rq_clock(rq);
781 /*
a2a2d680 782 * Update run-time statistics of the 'current'.
1799e35d 783 */
2b1e315d 784 update_curr(cfs_rq);
1799e35d
IM
785
786 return;
787 }
788 /*
789 * Find the rightmost entry in the rbtree:
bf0f6f24 790 */
2b1e315d 791 rightmost = __pick_last_entity(cfs_rq);
1799e35d
IM
792 /*
793 * Already in the rightmost position?
794 */
2b1e315d 795 if (unlikely(rightmost->vruntime < se->vruntime))
1799e35d
IM
796 return;
797
798 /*
799 * Minimally necessary key value to be last in the tree:
2b1e315d
DA
800 * Upon rescheduling, sched_class::put_prev_task() will place
801 * 'current' within the tree based on its new key value.
1799e35d 802 */
30cfdcfc 803 se->vruntime = rightmost->vruntime + 1;
bf0f6f24
IM
804}
805
806/*
807 * Preempt the current task with a newly woken task if needed:
808 */
2e09bf55 809static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
810{
811 struct task_struct *curr = rq->curr;
fad095a7 812 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8651a86c 813 struct sched_entity *se = &curr->se, *pse = &p->se;
810e95cc 814 s64 delta, gran;
bf0f6f24
IM
815
816 if (unlikely(rt_prio(p->prio))) {
a8e504d2 817 update_rq_clock(rq);
b7cc0896 818 update_curr(cfs_rq);
bf0f6f24
IM
819 resched_task(curr);
820 return;
821 }
91c234b4
IM
822 /*
823 * Batch tasks do not preempt (their preemption is driven by
824 * the tick):
825 */
826 if (unlikely(p->policy == SCHED_BATCH))
827 return;
bf0f6f24 828
ce6c1311
PZ
829 if (sched_feat(WAKEUP_PREEMPT)) {
830 while (!is_same_group(se, pse)) {
831 se = parent_entity(se);
832 pse = parent_entity(pse);
833 }
8651a86c 834
ce6c1311
PZ
835 delta = se->vruntime - pse->vruntime;
836 gran = sysctl_sched_wakeup_granularity;
837 if (unlikely(se->load.weight != NICE_0_LOAD))
838 gran = calc_delta_fair(gran, &se->load);
8651a86c 839
95938a35
MG
840 if (delta > gran) {
841 int now = !sched_feat(PREEMPT_RESTRICT);
842
843 if (now || p->prio < curr->prio || !se->peer_preempt++)
844 resched_task(curr);
845 }
ce6c1311 846 }
bf0f6f24
IM
847}
848
fb8d4724 849static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24
IM
850{
851 struct cfs_rq *cfs_rq = &rq->cfs;
852 struct sched_entity *se;
853
854 if (unlikely(!cfs_rq->nr_running))
855 return NULL;
856
857 do {
9948f4b2 858 se = pick_next_entity(cfs_rq);
bf0f6f24
IM
859 cfs_rq = group_cfs_rq(se);
860 } while (cfs_rq);
861
862 return task_of(se);
863}
864
865/*
866 * Account for a descheduled task:
867 */
31ee529c 868static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
869{
870 struct sched_entity *se = &prev->se;
871 struct cfs_rq *cfs_rq;
872
873 for_each_sched_entity(se) {
874 cfs_rq = cfs_rq_of(se);
ab6cde26 875 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
876 }
877}
878
879/**************************************************
880 * Fair scheduling class load-balancing methods:
881 */
882
883/*
884 * Load-balancing iterator. Note: while the runqueue stays locked
885 * during the whole iteration, the current task might be
886 * dequeued so the iterator has to be dequeue-safe. Here we
887 * achieve that by always pre-iterating before returning
888 * the current task:
889 */
a9957449 890static struct task_struct *
bf0f6f24
IM
891__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
892{
893 struct task_struct *p;
894
895 if (!curr)
896 return NULL;
897
898 p = rb_entry(curr, struct task_struct, se.run_node);
899 cfs_rq->rb_load_balance_curr = rb_next(curr);
900
901 return p;
902}
903
904static struct task_struct *load_balance_start_fair(void *arg)
905{
906 struct cfs_rq *cfs_rq = arg;
907
908 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
909}
910
911static struct task_struct *load_balance_next_fair(void *arg)
912{
913 struct cfs_rq *cfs_rq = arg;
914
915 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
916}
917
a4ac01c3 918#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24
IM
919static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
920{
921 struct sched_entity *curr;
922 struct task_struct *p;
923
924 if (!cfs_rq->nr_running)
925 return MAX_PRIO;
926
9b5b7751
SV
927 curr = cfs_rq->curr;
928 if (!curr)
929 curr = __pick_next_entity(cfs_rq);
930
bf0f6f24
IM
931 p = task_of(curr);
932
933 return p->prio;
934}
a4ac01c3 935#endif
bf0f6f24 936
43010659 937static unsigned long
bf0f6f24 938load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
a4ac01c3
PW
939 unsigned long max_nr_move, unsigned long max_load_move,
940 struct sched_domain *sd, enum cpu_idle_type idle,
941 int *all_pinned, int *this_best_prio)
bf0f6f24
IM
942{
943 struct cfs_rq *busy_cfs_rq;
944 unsigned long load_moved, total_nr_moved = 0, nr_moved;
945 long rem_load_move = max_load_move;
946 struct rq_iterator cfs_rq_iterator;
947
948 cfs_rq_iterator.start = load_balance_start_fair;
949 cfs_rq_iterator.next = load_balance_next_fair;
950
951 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
a4ac01c3 952#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 953 struct cfs_rq *this_cfs_rq;
e56f31aa 954 long imbalance;
bf0f6f24 955 unsigned long maxload;
bf0f6f24
IM
956
957 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
958
e56f31aa 959 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
bf0f6f24
IM
960 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
961 if (imbalance <= 0)
962 continue;
963
964 /* Don't pull more than imbalance/2 */
965 imbalance /= 2;
966 maxload = min(rem_load_move, imbalance);
967
a4ac01c3
PW
968 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
969#else
e56f31aa 970# define maxload rem_load_move
a4ac01c3 971#endif
bf0f6f24
IM
972 /* pass busy_cfs_rq argument into
973 * load_balance_[start|next]_fair iterators
974 */
975 cfs_rq_iterator.arg = busy_cfs_rq;
976 nr_moved = balance_tasks(this_rq, this_cpu, busiest,
977 max_nr_move, maxload, sd, idle, all_pinned,
a4ac01c3 978 &load_moved, this_best_prio, &cfs_rq_iterator);
bf0f6f24
IM
979
980 total_nr_moved += nr_moved;
981 max_nr_move -= nr_moved;
982 rem_load_move -= load_moved;
983
984 if (max_nr_move <= 0 || rem_load_move <= 0)
985 break;
986 }
987
43010659 988 return max_load_move - rem_load_move;
bf0f6f24
IM
989}
990
991/*
992 * scheduler tick hitting a task of our scheduling class:
993 */
994static void task_tick_fair(struct rq *rq, struct task_struct *curr)
995{
996 struct cfs_rq *cfs_rq;
997 struct sched_entity *se = &curr->se;
998
999 for_each_sched_entity(se) {
1000 cfs_rq = cfs_rq_of(se);
1001 entity_tick(cfs_rq, se);
1002 }
1003}
1004
4d78e7b6
PZ
1005#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
1006
bf0f6f24
IM
1007/*
1008 * Share the fairness runtime between parent and child, thus the
1009 * total amount of pressure for CPU stays equal - new tasks
1010 * get a chance to run but frequent forkers are not allowed to
1011 * monopolize the CPU. Note: the parent runqueue is locked,
1012 * the child is not running yet.
1013 */
ee0827d8 1014static void task_new_fair(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
1015{
1016 struct cfs_rq *cfs_rq = task_cfs_rq(p);
429d43bc 1017 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
00bf7bfc 1018 int this_cpu = smp_processor_id();
bf0f6f24
IM
1019
1020 sched_info_queued(p);
1021
7109c442 1022 update_curr(cfs_rq);
aeb73b04 1023 place_entity(cfs_rq, se, 1);
4d78e7b6 1024
00bf7bfc 1025 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
4d78e7b6 1026 curr->vruntime < se->vruntime) {
87fefa38 1027 /*
edcb60a3
IM
1028 * Upon rescheduling, sched_class::put_prev_task() will place
1029 * 'current' within the tree based on its new key value.
1030 */
4d78e7b6 1031 swap(curr->vruntime, se->vruntime);
4d78e7b6 1032 }
bf0f6f24 1033
95938a35 1034 se->peer_preempt = 0;
b9dca1e0 1035 enqueue_task_fair(rq, p, 0);
bb61c210 1036 resched_task(rq->curr);
bf0f6f24
IM
1037}
1038
83b699ed
SV
1039/* Account for a task changing its policy or group.
1040 *
1041 * This routine is mostly called to set cfs_rq->curr field when a task
1042 * migrates between groups/classes.
1043 */
1044static void set_curr_task_fair(struct rq *rq)
1045{
1046 struct sched_entity *se = &rq->curr->se;
1047
1048 for_each_sched_entity(se)
1049 set_next_entity(cfs_rq_of(se), se);
1050}
1051
bf0f6f24
IM
1052/*
1053 * All the scheduling class methods:
1054 */
5522d5d5
IM
1055static const struct sched_class fair_sched_class = {
1056 .next = &idle_sched_class,
bf0f6f24
IM
1057 .enqueue_task = enqueue_task_fair,
1058 .dequeue_task = dequeue_task_fair,
1059 .yield_task = yield_task_fair,
1060
2e09bf55 1061 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
1062
1063 .pick_next_task = pick_next_task_fair,
1064 .put_prev_task = put_prev_task_fair,
1065
1066 .load_balance = load_balance_fair,
1067
83b699ed 1068 .set_curr_task = set_curr_task_fair,
bf0f6f24
IM
1069 .task_tick = task_tick_fair,
1070 .task_new = task_new_fair,
1071};
1072
1073#ifdef CONFIG_SCHED_DEBUG
5cef9eca 1074static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 1075{
bf0f6f24
IM
1076 struct cfs_rq *cfs_rq;
1077
75c28ace
SV
1078#ifdef CONFIG_FAIR_GROUP_SCHED
1079 print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
1080#endif
c3b64f1e 1081 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 1082 print_cfs_rq(m, cpu, cfs_rq);
bf0f6f24
IM
1083}
1084#endif