]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched_fair.c
Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
[net-next-2.6.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c
AV
23#include <linux/latencytop.h>
24
bf0f6f24 25/*
21805085 26 * Targeted preemption latency for CPU-bound tasks:
722aab0c 27 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 28 *
21805085 29 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
30 * 'timeslice length' - timeslices in CFS are of variable length
31 * and have no persistent notion like in traditional, time-slice
32 * based scheduling concepts.
bf0f6f24 33 *
d274a4ce
IM
34 * (to see the precise effective timeslice length of your workload,
35 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 36 */
19978ca6 37unsigned int sysctl_sched_latency = 20000000ULL;
2bd8e6d4
IM
38
39/*
b2be5e96 40 * Minimal preemption granularity for CPU-bound tasks:
722aab0c 41 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 42 */
722aab0c 43unsigned int sysctl_sched_min_granularity = 4000000ULL;
21805085
PZ
44
45/*
b2be5e96
PZ
46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
47 */
722aab0c 48static unsigned int sched_nr_latency = 5;
b2be5e96
PZ
49
50/*
51 * After fork, child runs first. (default) If set to 0 then
52 * parent will (try to) run first.
21805085 53 */
b2be5e96 54const_debug unsigned int sysctl_sched_child_runs_first = 1;
bf0f6f24 55
1799e35d
IM
56/*
57 * sys_sched_yield() compat mode
58 *
59 * This option switches the agressive yield implementation of the
60 * old scheduler back on.
61 */
62unsigned int __read_mostly sysctl_sched_compat_yield;
63
bf0f6f24
IM
64/*
65 * SCHED_BATCH wake-up granularity.
722aab0c 66 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
67 *
68 * This option delays the preemption effects of decoupled workloads
69 * and reduces their over-scheduling. Synchronous workloads will still
70 * have immediate wakeup/sleep latencies.
71 */
19978ca6 72unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
bf0f6f24
IM
73
74/*
75 * SCHED_OTHER wake-up granularity.
722aab0c 76 * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
77 *
78 * This option delays the preemption effects of decoupled workloads
79 * and reduces their over-scheduling. Synchronous workloads will still
80 * have immediate wakeup/sleep latencies.
81 */
19978ca6 82unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
bf0f6f24 83
da84d961
IM
84const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
85
bf0f6f24
IM
86/**************************************************************
87 * CFS operations on generic schedulable entities:
88 */
89
62160e3f 90#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 91
62160e3f 92/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
93static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
94{
62160e3f 95 return cfs_rq->rq;
bf0f6f24
IM
96}
97
62160e3f
IM
98/* An entity is a task if it doesn't "own" a runqueue */
99#define entity_is_task(se) (!se->my_q)
bf0f6f24 100
62160e3f 101#else /* CONFIG_FAIR_GROUP_SCHED */
bf0f6f24 102
62160e3f
IM
103static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
104{
105 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
106}
107
108#define entity_is_task(se) 1
109
bf0f6f24
IM
110#endif /* CONFIG_FAIR_GROUP_SCHED */
111
112static inline struct task_struct *task_of(struct sched_entity *se)
113{
114 return container_of(se, struct task_struct, se);
115}
116
117
118/**************************************************************
119 * Scheduling class tree data structure manipulation methods:
120 */
121
0702e3eb 122static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 123{
368059a9
PZ
124 s64 delta = (s64)(vruntime - min_vruntime);
125 if (delta > 0)
02e0431a
PZ
126 min_vruntime = vruntime;
127
128 return min_vruntime;
129}
130
0702e3eb 131static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
132{
133 s64 delta = (s64)(vruntime - min_vruntime);
134 if (delta < 0)
135 min_vruntime = vruntime;
136
137 return min_vruntime;
138}
139
0702e3eb 140static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
9014623c 141{
30cfdcfc 142 return se->vruntime - cfs_rq->min_vruntime;
9014623c
PZ
143}
144
bf0f6f24
IM
145/*
146 * Enqueue an entity into the rb-tree:
147 */
0702e3eb 148static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
149{
150 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
151 struct rb_node *parent = NULL;
152 struct sched_entity *entry;
9014623c 153 s64 key = entity_key(cfs_rq, se);
bf0f6f24
IM
154 int leftmost = 1;
155
156 /*
157 * Find the right place in the rbtree:
158 */
159 while (*link) {
160 parent = *link;
161 entry = rb_entry(parent, struct sched_entity, run_node);
162 /*
163 * We dont care about collisions. Nodes with
164 * the same key stay together.
165 */
9014623c 166 if (key < entity_key(cfs_rq, entry)) {
bf0f6f24
IM
167 link = &parent->rb_left;
168 } else {
169 link = &parent->rb_right;
170 leftmost = 0;
171 }
172 }
173
174 /*
175 * Maintain a cache of leftmost tree entries (it is frequently
176 * used):
177 */
178 if (leftmost)
57cb499d 179 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
180
181 rb_link_node(&se->run_node, parent, link);
182 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
183}
184
0702e3eb 185static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
186{
187 if (cfs_rq->rb_leftmost == &se->run_node)
57cb499d 188 cfs_rq->rb_leftmost = rb_next(&se->run_node);
e9acbff6 189
bf0f6f24 190 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
191}
192
193static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
194{
195 return cfs_rq->rb_leftmost;
196}
197
198static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
199{
200 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
201}
202
aeb73b04
PZ
203static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
204{
7eee3e67 205 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 206
70eee74b
BS
207 if (!last)
208 return NULL;
7eee3e67
IM
209
210 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
211}
212
bf0f6f24
IM
213/**************************************************************
214 * Scheduling class statistics methods:
215 */
216
b2be5e96
PZ
217#ifdef CONFIG_SCHED_DEBUG
218int sched_nr_latency_handler(struct ctl_table *table, int write,
219 struct file *filp, void __user *buffer, size_t *lenp,
220 loff_t *ppos)
221{
222 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
223
224 if (ret || !write)
225 return ret;
226
227 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
228 sysctl_sched_min_granularity);
229
230 return 0;
231}
232#endif
647e7cac
IM
233
234/*
235 * The idea is to set a period in which each task runs once.
236 *
237 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
238 * this period because otherwise the slices get too small.
239 *
240 * p = (nr <= nl) ? l : l*nr/nl
241 */
4d78e7b6
PZ
242static u64 __sched_period(unsigned long nr_running)
243{
244 u64 period = sysctl_sched_latency;
b2be5e96 245 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
246
247 if (unlikely(nr_running > nr_latency)) {
4bf0b771 248 period = sysctl_sched_min_granularity;
4d78e7b6 249 period *= nr_running;
4d78e7b6
PZ
250 }
251
252 return period;
253}
254
647e7cac
IM
255/*
256 * We calculate the wall-time slice from the period by taking a part
257 * proportional to the weight.
258 *
259 * s = p*w/rw
260 */
6d0f0ebd 261static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 262{
647e7cac 263 u64 slice = __sched_period(cfs_rq->nr_running);
21805085 264
647e7cac
IM
265 slice *= se->load.weight;
266 do_div(slice, cfs_rq->load.weight);
21805085 267
647e7cac 268 return slice;
bf0f6f24
IM
269}
270
647e7cac
IM
271/*
272 * We calculate the vruntime slice.
273 *
274 * vs = s/w = p/rw
275 */
276static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
67e9fb2a 277{
647e7cac 278 u64 vslice = __sched_period(nr_running);
67e9fb2a 279
10b77724 280 vslice *= NICE_0_LOAD;
647e7cac 281 do_div(vslice, rq_weight);
67e9fb2a 282
647e7cac
IM
283 return vslice;
284}
5f6d858e 285
647e7cac
IM
286static u64 sched_vslice(struct cfs_rq *cfs_rq)
287{
288 return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
289}
290
291static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
292{
293 return __sched_vslice(cfs_rq->load.weight + se->load.weight,
294 cfs_rq->nr_running + 1);
67e9fb2a
PZ
295}
296
bf0f6f24
IM
297/*
298 * Update the current task's runtime statistics. Skip current tasks that
299 * are not in our scheduling class.
300 */
301static inline void
8ebc91d9
IM
302__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
303 unsigned long delta_exec)
bf0f6f24 304{
bbdba7c0 305 unsigned long delta_exec_weighted;
b0ffd246 306 u64 vruntime;
bf0f6f24 307
8179ca23 308 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
bf0f6f24
IM
309
310 curr->sum_exec_runtime += delta_exec;
7a62eabc 311 schedstat_add(cfs_rq, exec_clock, delta_exec);
e9acbff6
IM
312 delta_exec_weighted = delta_exec;
313 if (unlikely(curr->load.weight != NICE_0_LOAD)) {
314 delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
315 &curr->load);
316 }
317 curr->vruntime += delta_exec_weighted;
02e0431a
PZ
318
319 /*
320 * maintain cfs_rq->min_vruntime to be a monotonic increasing
321 * value tracking the leftmost vruntime in the tree.
322 */
323 if (first_fair(cfs_rq)) {
b0ffd246
PZ
324 vruntime = min_vruntime(curr->vruntime,
325 __pick_next_entity(cfs_rq)->vruntime);
02e0431a 326 } else
b0ffd246 327 vruntime = curr->vruntime;
02e0431a
PZ
328
329 cfs_rq->min_vruntime =
b0ffd246 330 max_vruntime(cfs_rq->min_vruntime, vruntime);
bf0f6f24
IM
331}
332
b7cc0896 333static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 334{
429d43bc 335 struct sched_entity *curr = cfs_rq->curr;
8ebc91d9 336 u64 now = rq_of(cfs_rq)->clock;
bf0f6f24
IM
337 unsigned long delta_exec;
338
339 if (unlikely(!curr))
340 return;
341
342 /*
343 * Get the amount of time the current task was running
344 * since the last time we changed load (this cannot
345 * overflow on 32 bits):
346 */
8ebc91d9 347 delta_exec = (unsigned long)(now - curr->exec_start);
bf0f6f24 348
8ebc91d9
IM
349 __update_curr(cfs_rq, curr, delta_exec);
350 curr->exec_start = now;
d842de87
SV
351
352 if (entity_is_task(curr)) {
353 struct task_struct *curtask = task_of(curr);
354
355 cpuacct_charge(curtask, delta_exec);
356 }
bf0f6f24
IM
357}
358
359static inline void
5870db5b 360update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 361{
d281918d 362 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
363}
364
bf0f6f24
IM
365/*
366 * Task is being enqueued - update stats:
367 */
d2417e5a 368static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 369{
bf0f6f24
IM
370 /*
371 * Are we enqueueing a waiting task? (for current tasks
372 * a dequeue/enqueue event is a NOP)
373 */
429d43bc 374 if (se != cfs_rq->curr)
5870db5b 375 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
376}
377
bf0f6f24 378static void
9ef0a961 379update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 380{
bbdba7c0
IM
381 schedstat_set(se->wait_max, max(se->wait_max,
382 rq_of(cfs_rq)->clock - se->wait_start));
6d082592
AV
383 schedstat_set(se->wait_count, se->wait_count + 1);
384 schedstat_set(se->wait_sum, se->wait_sum +
385 rq_of(cfs_rq)->clock - se->wait_start);
6cfb0d5d 386 schedstat_set(se->wait_start, 0);
bf0f6f24
IM
387}
388
389static inline void
19b6a2e3 390update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 391{
bf0f6f24
IM
392 /*
393 * Mark the end of the wait period if dequeueing a
394 * waiting task:
395 */
429d43bc 396 if (se != cfs_rq->curr)
9ef0a961 397 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
398}
399
400/*
401 * We are picking a new current task - update its stats:
402 */
403static inline void
79303e9e 404update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
405{
406 /*
407 * We are starting a new run period:
408 */
d281918d 409 se->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
410}
411
bf0f6f24
IM
412/**************************************************
413 * Scheduling class queueing methods:
414 */
415
30cfdcfc
DA
416static void
417account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
418{
419 update_load_add(&cfs_rq->load, se->load.weight);
420 cfs_rq->nr_running++;
421 se->on_rq = 1;
422}
423
424static void
425account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
426{
427 update_load_sub(&cfs_rq->load, se->load.weight);
428 cfs_rq->nr_running--;
429 se->on_rq = 0;
430}
431
2396af69 432static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 433{
bf0f6f24
IM
434#ifdef CONFIG_SCHEDSTATS
435 if (se->sleep_start) {
d281918d 436 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
9745512c 437 struct task_struct *tsk = task_of(se);
bf0f6f24
IM
438
439 if ((s64)delta < 0)
440 delta = 0;
441
442 if (unlikely(delta > se->sleep_max))
443 se->sleep_max = delta;
444
445 se->sleep_start = 0;
446 se->sum_sleep_runtime += delta;
9745512c
AV
447
448 account_scheduler_latency(tsk, delta >> 10, 1);
bf0f6f24
IM
449 }
450 if (se->block_start) {
d281918d 451 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
9745512c 452 struct task_struct *tsk = task_of(se);
bf0f6f24
IM
453
454 if ((s64)delta < 0)
455 delta = 0;
456
457 if (unlikely(delta > se->block_max))
458 se->block_max = delta;
459
460 se->block_start = 0;
461 se->sum_sleep_runtime += delta;
30084fbd
IM
462
463 /*
464 * Blocking time is in units of nanosecs, so shift by 20 to
465 * get a milliseconds-range estimation of the amount of
466 * time that the task spent sleeping:
467 */
468 if (unlikely(prof_on == SLEEP_PROFILING)) {
e22f5bbf 469
30084fbd
IM
470 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
471 delta >> 20);
472 }
9745512c 473 account_scheduler_latency(tsk, delta >> 10, 0);
bf0f6f24
IM
474 }
475#endif
476}
477
ddc97297
PZ
478static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
479{
480#ifdef CONFIG_SCHED_DEBUG
481 s64 d = se->vruntime - cfs_rq->min_vruntime;
482
483 if (d < 0)
484 d = -d;
485
486 if (d > 3*sysctl_sched_latency)
487 schedstat_inc(cfs_rq, nr_spread_over);
488#endif
489}
490
aeb73b04
PZ
491static void
492place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
493{
67e9fb2a 494 u64 vruntime;
aeb73b04 495
67e9fb2a 496 vruntime = cfs_rq->min_vruntime;
94dfb5e7 497
06877c33 498 if (sched_feat(TREE_AVG)) {
94dfb5e7
PZ
499 struct sched_entity *last = __pick_last_entity(cfs_rq);
500 if (last) {
67e9fb2a
PZ
501 vruntime += last->vruntime;
502 vruntime >>= 1;
94dfb5e7 503 }
67e9fb2a 504 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
647e7cac 505 vruntime += sched_vslice(cfs_rq)/2;
94dfb5e7 506
2cb8600e
PZ
507 /*
508 * The 'current' period is already promised to the current tasks,
509 * however the extra weight of the new task will slow them down a
510 * little, place the new task so that it fits in the slot that
511 * stays open at the end.
512 */
94dfb5e7 513 if (initial && sched_feat(START_DEBIT))
647e7cac 514 vruntime += sched_vslice_add(cfs_rq, se);
aeb73b04 515
8465e792 516 if (!initial) {
2cb8600e 517 /* sleeps upto a single latency don't count. */
296825cb 518 if (sched_feat(NEW_FAIR_SLEEPERS))
94359f05
IM
519 vruntime -= sysctl_sched_latency;
520
2cb8600e
PZ
521 /* ensure we never gain time by being placed backwards. */
522 vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
523 }
524
67e9fb2a 525 se->vruntime = vruntime;
aeb73b04
PZ
526}
527
bf0f6f24 528static void
83b699ed 529enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
bf0f6f24
IM
530{
531 /*
a2a2d680 532 * Update run-time statistics of the 'current'.
bf0f6f24 533 */
b7cc0896 534 update_curr(cfs_rq);
bf0f6f24 535
e9acbff6 536 if (wakeup) {
aeb73b04 537 place_entity(cfs_rq, se, 0);
2396af69 538 enqueue_sleeper(cfs_rq, se);
e9acbff6 539 }
bf0f6f24 540
d2417e5a 541 update_stats_enqueue(cfs_rq, se);
ddc97297 542 check_spread(cfs_rq, se);
83b699ed
SV
543 if (se != cfs_rq->curr)
544 __enqueue_entity(cfs_rq, se);
30cfdcfc 545 account_entity_enqueue(cfs_rq, se);
bf0f6f24
IM
546}
547
548static void
525c2716 549dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
bf0f6f24 550{
a2a2d680
DA
551 /*
552 * Update run-time statistics of the 'current'.
553 */
554 update_curr(cfs_rq);
555
19b6a2e3 556 update_stats_dequeue(cfs_rq, se);
db36cc7d 557 if (sleep) {
67e9fb2a 558#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
559 if (entity_is_task(se)) {
560 struct task_struct *tsk = task_of(se);
561
562 if (tsk->state & TASK_INTERRUPTIBLE)
d281918d 563 se->sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 564 if (tsk->state & TASK_UNINTERRUPTIBLE)
d281918d 565 se->block_start = rq_of(cfs_rq)->clock;
bf0f6f24 566 }
db36cc7d 567#endif
67e9fb2a
PZ
568 }
569
83b699ed 570 if (se != cfs_rq->curr)
30cfdcfc
DA
571 __dequeue_entity(cfs_rq, se);
572 account_entity_dequeue(cfs_rq, se);
bf0f6f24
IM
573}
574
575/*
576 * Preempt the current task with a newly woken task if needed:
577 */
7c92e54f 578static void
2e09bf55 579check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 580{
11697830
PZ
581 unsigned long ideal_runtime, delta_exec;
582
6d0f0ebd 583 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 584 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3e3e13f3 585 if (delta_exec > ideal_runtime)
bf0f6f24
IM
586 resched_task(rq_of(cfs_rq)->curr);
587}
588
83b699ed 589static void
8494f412 590set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 591{
83b699ed
SV
592 /* 'current' is not kept within the tree. */
593 if (se->on_rq) {
594 /*
595 * Any task has to be enqueued before it get to execute on
596 * a CPU. So account for the time it spent waiting on the
597 * runqueue.
598 */
599 update_stats_wait_end(cfs_rq, se);
600 __dequeue_entity(cfs_rq, se);
601 }
602
79303e9e 603 update_stats_curr_start(cfs_rq, se);
429d43bc 604 cfs_rq->curr = se;
eba1ed4b
IM
605#ifdef CONFIG_SCHEDSTATS
606 /*
607 * Track our maximum slice length, if the CPU's load is at
608 * least twice that of our own weight (i.e. dont track it
609 * when there are only lesser-weight tasks around):
610 */
495eca49 611 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
eba1ed4b
IM
612 se->slice_max = max(se->slice_max,
613 se->sum_exec_runtime - se->prev_sum_exec_runtime);
614 }
615#endif
4a55b450 616 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
617}
618
9948f4b2 619static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
bf0f6f24 620{
08ec3df5 621 struct sched_entity *se = NULL;
bf0f6f24 622
08ec3df5
DA
623 if (first_fair(cfs_rq)) {
624 se = __pick_next_entity(cfs_rq);
625 set_next_entity(cfs_rq, se);
626 }
bf0f6f24
IM
627
628 return se;
629}
630
ab6cde26 631static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
632{
633 /*
634 * If still on the runqueue then deactivate_task()
635 * was not called and update_curr() has to be done:
636 */
637 if (prev->on_rq)
b7cc0896 638 update_curr(cfs_rq);
bf0f6f24 639
ddc97297 640 check_spread(cfs_rq, prev);
30cfdcfc 641 if (prev->on_rq) {
5870db5b 642 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
643 /* Put 'current' back into the tree. */
644 __enqueue_entity(cfs_rq, prev);
645 }
429d43bc 646 cfs_rq->curr = NULL;
bf0f6f24
IM
647}
648
8f4d37ec
PZ
649static void
650entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 651{
bf0f6f24 652 /*
30cfdcfc 653 * Update run-time statistics of the 'current'.
bf0f6f24 654 */
30cfdcfc 655 update_curr(cfs_rq);
bf0f6f24 656
8f4d37ec
PZ
657#ifdef CONFIG_SCHED_HRTICK
658 /*
659 * queued ticks are scheduled to match the slice, so don't bother
660 * validating it and just reschedule.
661 */
662 if (queued)
663 return resched_task(rq_of(cfs_rq)->curr);
664 /*
665 * don't let the period tick interfere with the hrtick preemption
666 */
667 if (!sched_feat(DOUBLE_TICK) &&
668 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
669 return;
670#endif
671
ce6c1311 672 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
2e09bf55 673 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
674}
675
676/**************************************************
677 * CFS operations on tasks:
678 */
679
680#ifdef CONFIG_FAIR_GROUP_SCHED
681
682/* Walk up scheduling entities hierarchy */
683#define for_each_sched_entity(se) \
684 for (; se; se = se->parent)
685
686static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
687{
688 return p->se.cfs_rq;
689}
690
691/* runqueue on which this entity is (to be) queued */
692static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
693{
694 return se->cfs_rq;
695}
696
697/* runqueue "owned" by this group */
698static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
699{
700 return grp->my_q;
701}
702
703/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
704 * another cpu ('this_cpu')
705 */
706static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
707{
29f59db3 708 return cfs_rq->tg->cfs_rq[this_cpu];
bf0f6f24
IM
709}
710
711/* Iterate thr' all leaf cfs_rq's on a runqueue */
712#define for_each_leaf_cfs_rq(rq, cfs_rq) \
ec2c507f 713 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
bf0f6f24 714
fad095a7
SV
715/* Do the two (enqueued) entities belong to the same group ? */
716static inline int
717is_same_group(struct sched_entity *se, struct sched_entity *pse)
bf0f6f24 718{
fad095a7 719 if (se->cfs_rq == pse->cfs_rq)
bf0f6f24
IM
720 return 1;
721
722 return 0;
723}
724
fad095a7
SV
725static inline struct sched_entity *parent_entity(struct sched_entity *se)
726{
727 return se->parent;
728}
729
6b2d7700
SV
730#define GROUP_IMBALANCE_PCT 20
731
bf0f6f24
IM
732#else /* CONFIG_FAIR_GROUP_SCHED */
733
734#define for_each_sched_entity(se) \
735 for (; se; se = NULL)
736
737static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
738{
739 return &task_rq(p)->cfs;
740}
741
742static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
743{
744 struct task_struct *p = task_of(se);
745 struct rq *rq = task_rq(p);
746
747 return &rq->cfs;
748}
749
750/* runqueue "owned" by this group */
751static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
752{
753 return NULL;
754}
755
756static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
757{
758 return &cpu_rq(this_cpu)->cfs;
759}
760
761#define for_each_leaf_cfs_rq(rq, cfs_rq) \
762 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
763
fad095a7
SV
764static inline int
765is_same_group(struct sched_entity *se, struct sched_entity *pse)
bf0f6f24
IM
766{
767 return 1;
768}
769
fad095a7
SV
770static inline struct sched_entity *parent_entity(struct sched_entity *se)
771{
772 return NULL;
773}
774
bf0f6f24
IM
775#endif /* CONFIG_FAIR_GROUP_SCHED */
776
8f4d37ec
PZ
777#ifdef CONFIG_SCHED_HRTICK
778static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
779{
780 int requeue = rq->curr == p;
781 struct sched_entity *se = &p->se;
782 struct cfs_rq *cfs_rq = cfs_rq_of(se);
783
784 WARN_ON(task_rq(p) != rq);
785
786 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
787 u64 slice = sched_slice(cfs_rq, se);
788 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
789 s64 delta = slice - ran;
790
791 if (delta < 0) {
792 if (rq->curr == p)
793 resched_task(p);
794 return;
795 }
796
797 /*
798 * Don't schedule slices shorter than 10000ns, that just
799 * doesn't make sense. Rely on vruntime for fairness.
800 */
801 if (!requeue)
802 delta = max(10000LL, delta);
803
804 hrtick_start(rq, delta, requeue);
805 }
806}
807#else
808static inline void
809hrtick_start_fair(struct rq *rq, struct task_struct *p)
810{
811}
812#endif
813
bf0f6f24
IM
814/*
815 * The enqueue_task method is called before nr_running is
816 * increased. Here we update the fair scheduling stats and
817 * then put the task into the rbtree:
818 */
fd390f6a 819static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
bf0f6f24
IM
820{
821 struct cfs_rq *cfs_rq;
58e2d4ca
SV
822 struct sched_entity *se = &p->se,
823 *topse = NULL; /* Highest schedulable entity */
824 int incload = 1;
bf0f6f24
IM
825
826 for_each_sched_entity(se) {
58e2d4ca
SV
827 topse = se;
828 if (se->on_rq) {
829 incload = 0;
bf0f6f24 830 break;
58e2d4ca 831 }
bf0f6f24 832 cfs_rq = cfs_rq_of(se);
83b699ed 833 enqueue_entity(cfs_rq, se, wakeup);
b9fa3df3 834 wakeup = 1;
bf0f6f24 835 }
58e2d4ca
SV
836 /* Increment cpu load if we just enqueued the first task of a group on
837 * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
838 * at the highest grouping level.
839 */
840 if (incload)
841 inc_cpu_load(rq, topse->load.weight);
8f4d37ec
PZ
842
843 hrtick_start_fair(rq, rq->curr);
bf0f6f24
IM
844}
845
846/*
847 * The dequeue_task method is called before nr_running is
848 * decreased. We remove the task from the rbtree and
849 * update the fair scheduling stats:
850 */
f02231e5 851static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
bf0f6f24
IM
852{
853 struct cfs_rq *cfs_rq;
58e2d4ca
SV
854 struct sched_entity *se = &p->se,
855 *topse = NULL; /* Highest schedulable entity */
856 int decload = 1;
bf0f6f24
IM
857
858 for_each_sched_entity(se) {
58e2d4ca 859 topse = se;
bf0f6f24 860 cfs_rq = cfs_rq_of(se);
525c2716 861 dequeue_entity(cfs_rq, se, sleep);
bf0f6f24 862 /* Don't dequeue parent if it has other entities besides us */
58e2d4ca
SV
863 if (cfs_rq->load.weight) {
864 if (parent_entity(se))
865 decload = 0;
bf0f6f24 866 break;
58e2d4ca 867 }
b9fa3df3 868 sleep = 1;
bf0f6f24 869 }
58e2d4ca
SV
870 /* Decrement cpu load if we just dequeued the last task of a group on
871 * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
872 * at the highest grouping level.
873 */
874 if (decload)
875 dec_cpu_load(rq, topse->load.weight);
8f4d37ec
PZ
876
877 hrtick_start_fair(rq, rq->curr);
bf0f6f24
IM
878}
879
880/*
1799e35d
IM
881 * sched_yield() support is very simple - we dequeue and enqueue.
882 *
883 * If compat_yield is turned on then we requeue to the end of the tree.
bf0f6f24 884 */
4530d7ab 885static void yield_task_fair(struct rq *rq)
bf0f6f24 886{
db292ca3
IM
887 struct task_struct *curr = rq->curr;
888 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
889 struct sched_entity *rightmost, *se = &curr->se;
bf0f6f24
IM
890
891 /*
1799e35d
IM
892 * Are we the only task in the tree?
893 */
894 if (unlikely(cfs_rq->nr_running == 1))
895 return;
896
db292ca3 897 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1799e35d
IM
898 __update_rq_clock(rq);
899 /*
a2a2d680 900 * Update run-time statistics of the 'current'.
1799e35d 901 */
2b1e315d 902 update_curr(cfs_rq);
1799e35d
IM
903
904 return;
905 }
906 /*
907 * Find the rightmost entry in the rbtree:
bf0f6f24 908 */
2b1e315d 909 rightmost = __pick_last_entity(cfs_rq);
1799e35d
IM
910 /*
911 * Already in the rightmost position?
912 */
2b1e315d 913 if (unlikely(rightmost->vruntime < se->vruntime))
1799e35d
IM
914 return;
915
916 /*
917 * Minimally necessary key value to be last in the tree:
2b1e315d
DA
918 * Upon rescheduling, sched_class::put_prev_task() will place
919 * 'current' within the tree based on its new key value.
1799e35d 920 */
30cfdcfc 921 se->vruntime = rightmost->vruntime + 1;
bf0f6f24
IM
922}
923
e7693a36
GH
924/*
925 * wake_idle() will wake a task on an idle cpu if task->cpu is
926 * not idle and an idle cpu is available. The span of cpus to
927 * search starts with cpus closest then further out as needed,
928 * so we always favor a closer, idle cpu.
929 *
930 * Returns the CPU we should wake onto.
931 */
932#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
933static int wake_idle(int cpu, struct task_struct *p)
934{
935 cpumask_t tmp;
936 struct sched_domain *sd;
937 int i;
938
939 /*
940 * If it is idle, then it is the best cpu to run this task.
941 *
942 * This cpu is also the best, if it has more than one task already.
943 * Siblings must be also busy(in most cases) as they didn't already
944 * pickup the extra load from this cpu and hence we need not check
945 * sibling runqueue info. This will avoid the checks and cache miss
946 * penalities associated with that.
947 */
948 if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
949 return cpu;
950
951 for_each_domain(cpu, sd) {
952 if (sd->flags & SD_WAKE_IDLE) {
953 cpus_and(tmp, sd->span, p->cpus_allowed);
954 for_each_cpu_mask(i, tmp) {
955 if (idle_cpu(i)) {
956 if (i != task_cpu(p)) {
957 schedstat_inc(p,
958 se.nr_wakeups_idle);
959 }
960 return i;
961 }
962 }
963 } else {
964 break;
965 }
966 }
967 return cpu;
968}
969#else
970static inline int wake_idle(int cpu, struct task_struct *p)
971{
972 return cpu;
973}
974#endif
975
976#ifdef CONFIG_SMP
977static int select_task_rq_fair(struct task_struct *p, int sync)
978{
979 int cpu, this_cpu;
980 struct rq *rq;
981 struct sched_domain *sd, *this_sd = NULL;
982 int new_cpu;
983
984 cpu = task_cpu(p);
985 rq = task_rq(p);
986 this_cpu = smp_processor_id();
987 new_cpu = cpu;
988
9ec3b77e
DA
989 if (cpu == this_cpu)
990 goto out_set_cpu;
991
e7693a36
GH
992 for_each_domain(this_cpu, sd) {
993 if (cpu_isset(cpu, sd->span)) {
994 this_sd = sd;
995 break;
996 }
997 }
998
999 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1000 goto out_set_cpu;
1001
1002 /*
1003 * Check for affine wakeup and passive balancing possibilities.
1004 */
1005 if (this_sd) {
1006 int idx = this_sd->wake_idx;
1007 unsigned int imbalance;
1008 unsigned long load, this_load;
1009
1010 imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1011
1012 load = source_load(cpu, idx);
1013 this_load = target_load(this_cpu, idx);
1014
1015 new_cpu = this_cpu; /* Wake to this CPU if we can */
1016
1017 if (this_sd->flags & SD_WAKE_AFFINE) {
1018 unsigned long tl = this_load;
1019 unsigned long tl_per_task;
1020
1021 /*
1022 * Attract cache-cold tasks on sync wakeups:
1023 */
1024 if (sync && !task_hot(p, rq->clock, this_sd))
1025 goto out_set_cpu;
1026
1027 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1028 tl_per_task = cpu_avg_load_per_task(this_cpu);
1029
1030 /*
1031 * If sync wakeup then subtract the (maximum possible)
1032 * effect of the currently running task from the load
1033 * of the current CPU:
1034 */
1035 if (sync)
1036 tl -= current->se.load.weight;
1037
1038 if ((tl <= load &&
1039 tl + target_load(cpu, idx) <= tl_per_task) ||
1040 100*(tl + p->se.load.weight) <= imbalance*load) {
1041 /*
1042 * This domain has SD_WAKE_AFFINE and
1043 * p is cache cold in this domain, and
1044 * there is no bad imbalance.
1045 */
1046 schedstat_inc(this_sd, ttwu_move_affine);
1047 schedstat_inc(p, se.nr_wakeups_affine);
1048 goto out_set_cpu;
1049 }
1050 }
1051
1052 /*
1053 * Start passive balancing when half the imbalance_pct
1054 * limit is reached.
1055 */
1056 if (this_sd->flags & SD_WAKE_BALANCE) {
1057 if (imbalance*this_load <= 100*load) {
1058 schedstat_inc(this_sd, ttwu_move_balance);
1059 schedstat_inc(p, se.nr_wakeups_passive);
1060 goto out_set_cpu;
1061 }
1062 }
1063 }
1064
1065 new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
1066out_set_cpu:
1067 return wake_idle(new_cpu, p);
1068}
1069#endif /* CONFIG_SMP */
1070
1071
bf0f6f24
IM
1072/*
1073 * Preempt the current task with a newly woken task if needed:
1074 */
2e09bf55 1075static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
1076{
1077 struct task_struct *curr = rq->curr;
fad095a7 1078 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8651a86c 1079 struct sched_entity *se = &curr->se, *pse = &p->se;
502d26b5 1080 unsigned long gran;
bf0f6f24
IM
1081
1082 if (unlikely(rt_prio(p->prio))) {
a8e504d2 1083 update_rq_clock(rq);
b7cc0896 1084 update_curr(cfs_rq);
bf0f6f24
IM
1085 resched_task(curr);
1086 return;
1087 }
91c234b4
IM
1088 /*
1089 * Batch tasks do not preempt (their preemption is driven by
1090 * the tick):
1091 */
1092 if (unlikely(p->policy == SCHED_BATCH))
1093 return;
bf0f6f24 1094
77d9cc44
IM
1095 if (!sched_feat(WAKEUP_PREEMPT))
1096 return;
8651a86c 1097
77d9cc44
IM
1098 while (!is_same_group(se, pse)) {
1099 se = parent_entity(se);
1100 pse = parent_entity(pse);
ce6c1311 1101 }
77d9cc44 1102
77d9cc44 1103 gran = sysctl_sched_wakeup_granularity;
ef9884e6
PZ
1104 /*
1105 * More easily preempt - nice tasks, while not making
1106 * it harder for + nice tasks.
1107 */
1108 if (unlikely(se->load.weight > NICE_0_LOAD))
77d9cc44
IM
1109 gran = calc_delta_fair(gran, &se->load);
1110
502d26b5 1111 if (pse->vruntime + gran < se->vruntime)
77d9cc44 1112 resched_task(curr);
bf0f6f24
IM
1113}
1114
fb8d4724 1115static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 1116{
8f4d37ec 1117 struct task_struct *p;
bf0f6f24
IM
1118 struct cfs_rq *cfs_rq = &rq->cfs;
1119 struct sched_entity *se;
1120
1121 if (unlikely(!cfs_rq->nr_running))
1122 return NULL;
1123
1124 do {
9948f4b2 1125 se = pick_next_entity(cfs_rq);
bf0f6f24
IM
1126 cfs_rq = group_cfs_rq(se);
1127 } while (cfs_rq);
1128
8f4d37ec
PZ
1129 p = task_of(se);
1130 hrtick_start_fair(rq, p);
1131
1132 return p;
bf0f6f24
IM
1133}
1134
1135/*
1136 * Account for a descheduled task:
1137 */
31ee529c 1138static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
1139{
1140 struct sched_entity *se = &prev->se;
1141 struct cfs_rq *cfs_rq;
1142
1143 for_each_sched_entity(se) {
1144 cfs_rq = cfs_rq_of(se);
ab6cde26 1145 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
1146 }
1147}
1148
681f3e68 1149#ifdef CONFIG_SMP
bf0f6f24
IM
1150/**************************************************
1151 * Fair scheduling class load-balancing methods:
1152 */
1153
1154/*
1155 * Load-balancing iterator. Note: while the runqueue stays locked
1156 * during the whole iteration, the current task might be
1157 * dequeued so the iterator has to be dequeue-safe. Here we
1158 * achieve that by always pre-iterating before returning
1159 * the current task:
1160 */
a9957449 1161static struct task_struct *
bf0f6f24
IM
1162__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
1163{
1164 struct task_struct *p;
1165
1166 if (!curr)
1167 return NULL;
1168
1169 p = rb_entry(curr, struct task_struct, se.run_node);
1170 cfs_rq->rb_load_balance_curr = rb_next(curr);
1171
1172 return p;
1173}
1174
1175static struct task_struct *load_balance_start_fair(void *arg)
1176{
1177 struct cfs_rq *cfs_rq = arg;
1178
1179 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
1180}
1181
1182static struct task_struct *load_balance_next_fair(void *arg)
1183{
1184 struct cfs_rq *cfs_rq = arg;
1185
1186 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
1187}
1188
43010659 1189static unsigned long
bf0f6f24 1190load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f 1191 unsigned long max_load_move,
a4ac01c3
PW
1192 struct sched_domain *sd, enum cpu_idle_type idle,
1193 int *all_pinned, int *this_best_prio)
bf0f6f24
IM
1194{
1195 struct cfs_rq *busy_cfs_rq;
bf0f6f24
IM
1196 long rem_load_move = max_load_move;
1197 struct rq_iterator cfs_rq_iterator;
6b2d7700 1198 unsigned long load_moved;
bf0f6f24
IM
1199
1200 cfs_rq_iterator.start = load_balance_start_fair;
1201 cfs_rq_iterator.next = load_balance_next_fair;
1202
1203 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
a4ac01c3 1204#ifdef CONFIG_FAIR_GROUP_SCHED
6b2d7700
SV
1205 struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu];
1206 unsigned long maxload, task_load, group_weight;
1207 unsigned long thisload, per_task_load;
1208 struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu];
bf0f6f24 1209
6b2d7700
SV
1210 task_load = busy_cfs_rq->load.weight;
1211 group_weight = se->load.weight;
bf0f6f24 1212
6b2d7700
SV
1213 /*
1214 * 'group_weight' is contributed by tasks of total weight
1215 * 'task_load'. To move 'rem_load_move' worth of weight only,
1216 * we need to move a maximum task load of:
1217 *
1218 * maxload = (remload / group_weight) * task_load;
1219 */
1220 maxload = (rem_load_move * task_load) / group_weight;
1221
1222 if (!maxload || !task_load)
bf0f6f24
IM
1223 continue;
1224
6b2d7700
SV
1225 per_task_load = task_load / busy_cfs_rq->nr_running;
1226 /*
1227 * balance_tasks will try to forcibly move atleast one task if
1228 * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if
1229 * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load.
1230 */
1231 if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load)
1232 continue;
bf0f6f24 1233
6b2d7700
SV
1234 /* Disable priority-based load balance */
1235 *this_best_prio = 0;
1236 thisload = this_cfs_rq->load.weight;
a4ac01c3 1237#else
e56f31aa 1238# define maxload rem_load_move
a4ac01c3 1239#endif
e1d1484f
PW
1240 /*
1241 * pass busy_cfs_rq argument into
bf0f6f24
IM
1242 * load_balance_[start|next]_fair iterators
1243 */
1244 cfs_rq_iterator.arg = busy_cfs_rq;
6b2d7700 1245 load_moved = balance_tasks(this_rq, this_cpu, busiest,
e1d1484f
PW
1246 maxload, sd, idle, all_pinned,
1247 this_best_prio,
1248 &cfs_rq_iterator);
bf0f6f24 1249
6b2d7700
SV
1250#ifdef CONFIG_FAIR_GROUP_SCHED
1251 /*
1252 * load_moved holds the task load that was moved. The
1253 * effective (group) weight moved would be:
1254 * load_moved_eff = load_moved/task_load * group_weight;
1255 */
1256 load_moved = (group_weight * load_moved) / task_load;
1257
1258 /* Adjust shares on both cpus to reflect load_moved */
1259 group_weight -= load_moved;
1260 set_se_shares(se, group_weight);
1261
1262 se = busy_cfs_rq->tg->se[this_cpu];
1263 if (!thisload)
1264 group_weight = load_moved;
1265 else
1266 group_weight = se->load.weight + load_moved;
1267 set_se_shares(se, group_weight);
1268#endif
1269
1270 rem_load_move -= load_moved;
1271
e1d1484f 1272 if (rem_load_move <= 0)
bf0f6f24
IM
1273 break;
1274 }
1275
43010659 1276 return max_load_move - rem_load_move;
bf0f6f24
IM
1277}
1278
e1d1484f
PW
1279static int
1280move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1281 struct sched_domain *sd, enum cpu_idle_type idle)
1282{
1283 struct cfs_rq *busy_cfs_rq;
1284 struct rq_iterator cfs_rq_iterator;
1285
1286 cfs_rq_iterator.start = load_balance_start_fair;
1287 cfs_rq_iterator.next = load_balance_next_fair;
1288
1289 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1290 /*
1291 * pass busy_cfs_rq argument into
1292 * load_balance_[start|next]_fair iterators
1293 */
1294 cfs_rq_iterator.arg = busy_cfs_rq;
1295 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1296 &cfs_rq_iterator))
1297 return 1;
1298 }
1299
1300 return 0;
1301}
681f3e68 1302#endif
e1d1484f 1303
bf0f6f24
IM
1304/*
1305 * scheduler tick hitting a task of our scheduling class:
1306 */
8f4d37ec 1307static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
1308{
1309 struct cfs_rq *cfs_rq;
1310 struct sched_entity *se = &curr->se;
1311
1312 for_each_sched_entity(se) {
1313 cfs_rq = cfs_rq_of(se);
8f4d37ec 1314 entity_tick(cfs_rq, se, queued);
bf0f6f24
IM
1315 }
1316}
1317
8eb172d9 1318#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
4d78e7b6 1319
bf0f6f24
IM
1320/*
1321 * Share the fairness runtime between parent and child, thus the
1322 * total amount of pressure for CPU stays equal - new tasks
1323 * get a chance to run but frequent forkers are not allowed to
1324 * monopolize the CPU. Note: the parent runqueue is locked,
1325 * the child is not running yet.
1326 */
ee0827d8 1327static void task_new_fair(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
1328{
1329 struct cfs_rq *cfs_rq = task_cfs_rq(p);
429d43bc 1330 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
00bf7bfc 1331 int this_cpu = smp_processor_id();
bf0f6f24
IM
1332
1333 sched_info_queued(p);
1334
7109c442 1335 update_curr(cfs_rq);
aeb73b04 1336 place_entity(cfs_rq, se, 1);
4d78e7b6 1337
3c90e6e9 1338 /* 'curr' will be NULL if the child belongs to a different group */
00bf7bfc 1339 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
3c90e6e9 1340 curr && curr->vruntime < se->vruntime) {
87fefa38 1341 /*
edcb60a3
IM
1342 * Upon rescheduling, sched_class::put_prev_task() will place
1343 * 'current' within the tree based on its new key value.
1344 */
4d78e7b6 1345 swap(curr->vruntime, se->vruntime);
4d78e7b6 1346 }
bf0f6f24 1347
b9dca1e0 1348 enqueue_task_fair(rq, p, 0);
bb61c210 1349 resched_task(rq->curr);
bf0f6f24
IM
1350}
1351
cb469845
SR
1352/*
1353 * Priority of the task has changed. Check to see if we preempt
1354 * the current task.
1355 */
1356static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1357 int oldprio, int running)
1358{
1359 /*
1360 * Reschedule if we are currently running on this runqueue and
1361 * our priority decreased, or if we are not currently running on
1362 * this runqueue and our priority is higher than the current's
1363 */
1364 if (running) {
1365 if (p->prio > oldprio)
1366 resched_task(rq->curr);
1367 } else
1368 check_preempt_curr(rq, p);
1369}
1370
1371/*
1372 * We switched to the sched_fair class.
1373 */
1374static void switched_to_fair(struct rq *rq, struct task_struct *p,
1375 int running)
1376{
1377 /*
1378 * We were most likely switched from sched_rt, so
1379 * kick off the schedule if running, otherwise just see
1380 * if we can still preempt the current task.
1381 */
1382 if (running)
1383 resched_task(rq->curr);
1384 else
1385 check_preempt_curr(rq, p);
1386}
1387
83b699ed
SV
1388/* Account for a task changing its policy or group.
1389 *
1390 * This routine is mostly called to set cfs_rq->curr field when a task
1391 * migrates between groups/classes.
1392 */
1393static void set_curr_task_fair(struct rq *rq)
1394{
1395 struct sched_entity *se = &rq->curr->se;
1396
1397 for_each_sched_entity(se)
1398 set_next_entity(cfs_rq_of(se), se);
1399}
1400
bf0f6f24
IM
1401/*
1402 * All the scheduling class methods:
1403 */
5522d5d5
IM
1404static const struct sched_class fair_sched_class = {
1405 .next = &idle_sched_class,
bf0f6f24
IM
1406 .enqueue_task = enqueue_task_fair,
1407 .dequeue_task = dequeue_task_fair,
1408 .yield_task = yield_task_fair,
e7693a36
GH
1409#ifdef CONFIG_SMP
1410 .select_task_rq = select_task_rq_fair,
1411#endif /* CONFIG_SMP */
bf0f6f24 1412
2e09bf55 1413 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
1414
1415 .pick_next_task = pick_next_task_fair,
1416 .put_prev_task = put_prev_task_fair,
1417
681f3e68 1418#ifdef CONFIG_SMP
bf0f6f24 1419 .load_balance = load_balance_fair,
e1d1484f 1420 .move_one_task = move_one_task_fair,
681f3e68 1421#endif
bf0f6f24 1422
83b699ed 1423 .set_curr_task = set_curr_task_fair,
bf0f6f24
IM
1424 .task_tick = task_tick_fair,
1425 .task_new = task_new_fair,
cb469845
SR
1426
1427 .prio_changed = prio_changed_fair,
1428 .switched_to = switched_to_fair,
bf0f6f24
IM
1429};
1430
1431#ifdef CONFIG_SCHED_DEBUG
5cef9eca 1432static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 1433{
bf0f6f24
IM
1434 struct cfs_rq *cfs_rq;
1435
75c28ace
SV
1436#ifdef CONFIG_FAIR_GROUP_SCHED
1437 print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
1438#endif
5973e5b9 1439 rcu_read_lock();
c3b64f1e 1440 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 1441 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 1442 rcu_read_unlock();
bf0f6f24
IM
1443}
1444#endif