]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched_fair.c
plist: Make plist debugging raw_spinlock aware
[net-next-2.6.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
9745512c 25
bf0f6f24 26/*
21805085 27 * Targeted preemption latency for CPU-bound tasks:
172e082a 28 * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 29 *
21805085 30 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
31 * 'timeslice length' - timeslices in CFS are of variable length
32 * and have no persistent notion like in traditional, time-slice
33 * based scheduling concepts.
bf0f6f24 34 *
d274a4ce
IM
35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 37 */
172e082a 38unsigned int sysctl_sched_latency = 5000000ULL;
0bcdcf28 39unsigned int normalized_sysctl_sched_latency = 5000000ULL;
2bd8e6d4 40
1983a922
CE
41/*
42 * The initial- and re-scaling of tunables is configurable
43 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
44 *
45 * Options are:
46 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
47 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
48 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
49 */
50enum sched_tunable_scaling sysctl_sched_tunable_scaling
51 = SCHED_TUNABLESCALING_LOG;
52
2bd8e6d4 53/*
b2be5e96 54 * Minimal preemption granularity for CPU-bound tasks:
172e082a 55 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 56 */
172e082a 57unsigned int sysctl_sched_min_granularity = 1000000ULL;
0bcdcf28 58unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL;
21805085
PZ
59
60/*
b2be5e96
PZ
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */
722aab0c 63static unsigned int sched_nr_latency = 5;
b2be5e96
PZ
64
65/*
2bba22c5 66 * After fork, child runs first. If set to 0 (default) then
b2be5e96 67 * parent will (try to) run first.
21805085 68 */
2bba22c5 69unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 70
1799e35d
IM
71/*
72 * sys_sched_yield() compat mode
73 *
74 * This option switches the agressive yield implementation of the
75 * old scheduler back on.
76 */
77unsigned int __read_mostly sysctl_sched_compat_yield;
78
bf0f6f24
IM
79/*
80 * SCHED_OTHER wake-up granularity.
172e082a 81 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
82 *
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
86 */
172e082a 87unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 88unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 89
da84d961
IM
90const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91
a4c2f00f
PZ
92static const struct sched_class fair_sched_class;
93
bf0f6f24
IM
94/**************************************************************
95 * CFS operations on generic schedulable entities:
96 */
97
62160e3f 98#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 99
62160e3f 100/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
62160e3f 103 return cfs_rq->rq;
bf0f6f24
IM
104}
105
62160e3f
IM
106/* An entity is a task if it doesn't "own" a runqueue */
107#define entity_is_task(se) (!se->my_q)
bf0f6f24 108
8f48894f
PZ
109static inline struct task_struct *task_of(struct sched_entity *se)
110{
111#ifdef CONFIG_SCHED_DEBUG
112 WARN_ON_ONCE(!entity_is_task(se));
113#endif
114 return container_of(se, struct task_struct, se);
115}
116
b758149c
PZ
117/* Walk up scheduling entities hierarchy */
118#define for_each_sched_entity(se) \
119 for (; se; se = se->parent)
120
121static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
122{
123 return p->se.cfs_rq;
124}
125
126/* runqueue on which this entity is (to be) queued */
127static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
128{
129 return se->cfs_rq;
130}
131
132/* runqueue "owned" by this group */
133static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
134{
135 return grp->my_q;
136}
137
138/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
139 * another cpu ('this_cpu')
140 */
141static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
142{
143 return cfs_rq->tg->cfs_rq[this_cpu];
144}
145
146/* Iterate thr' all leaf cfs_rq's on a runqueue */
147#define for_each_leaf_cfs_rq(rq, cfs_rq) \
148 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
149
150/* Do the two (enqueued) entities belong to the same group ? */
151static inline int
152is_same_group(struct sched_entity *se, struct sched_entity *pse)
153{
154 if (se->cfs_rq == pse->cfs_rq)
155 return 1;
156
157 return 0;
158}
159
160static inline struct sched_entity *parent_entity(struct sched_entity *se)
161{
162 return se->parent;
163}
164
464b7527
PZ
165/* return depth at which a sched entity is present in the hierarchy */
166static inline int depth_se(struct sched_entity *se)
167{
168 int depth = 0;
169
170 for_each_sched_entity(se)
171 depth++;
172
173 return depth;
174}
175
176static void
177find_matching_se(struct sched_entity **se, struct sched_entity **pse)
178{
179 int se_depth, pse_depth;
180
181 /*
182 * preemption test can be made between sibling entities who are in the
183 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
184 * both tasks until we find their ancestors who are siblings of common
185 * parent.
186 */
187
188 /* First walk up until both entities are at same depth */
189 se_depth = depth_se(*se);
190 pse_depth = depth_se(*pse);
191
192 while (se_depth > pse_depth) {
193 se_depth--;
194 *se = parent_entity(*se);
195 }
196
197 while (pse_depth > se_depth) {
198 pse_depth--;
199 *pse = parent_entity(*pse);
200 }
201
202 while (!is_same_group(*se, *pse)) {
203 *se = parent_entity(*se);
204 *pse = parent_entity(*pse);
205 }
206}
207
8f48894f
PZ
208#else /* !CONFIG_FAIR_GROUP_SCHED */
209
210static inline struct task_struct *task_of(struct sched_entity *se)
211{
212 return container_of(se, struct task_struct, se);
213}
bf0f6f24 214
62160e3f
IM
215static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
216{
217 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
218}
219
220#define entity_is_task(se) 1
221
b758149c
PZ
222#define for_each_sched_entity(se) \
223 for (; se; se = NULL)
bf0f6f24 224
b758149c 225static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 226{
b758149c 227 return &task_rq(p)->cfs;
bf0f6f24
IM
228}
229
b758149c
PZ
230static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
231{
232 struct task_struct *p = task_of(se);
233 struct rq *rq = task_rq(p);
234
235 return &rq->cfs;
236}
237
238/* runqueue "owned" by this group */
239static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
240{
241 return NULL;
242}
243
244static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
245{
246 return &cpu_rq(this_cpu)->cfs;
247}
248
249#define for_each_leaf_cfs_rq(rq, cfs_rq) \
250 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
251
252static inline int
253is_same_group(struct sched_entity *se, struct sched_entity *pse)
254{
255 return 1;
256}
257
258static inline struct sched_entity *parent_entity(struct sched_entity *se)
259{
260 return NULL;
261}
262
464b7527
PZ
263static inline void
264find_matching_se(struct sched_entity **se, struct sched_entity **pse)
265{
266}
267
b758149c
PZ
268#endif /* CONFIG_FAIR_GROUP_SCHED */
269
bf0f6f24
IM
270
271/**************************************************************
272 * Scheduling class tree data structure manipulation methods:
273 */
274
0702e3eb 275static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 276{
368059a9
PZ
277 s64 delta = (s64)(vruntime - min_vruntime);
278 if (delta > 0)
02e0431a
PZ
279 min_vruntime = vruntime;
280
281 return min_vruntime;
282}
283
0702e3eb 284static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
285{
286 s64 delta = (s64)(vruntime - min_vruntime);
287 if (delta < 0)
288 min_vruntime = vruntime;
289
290 return min_vruntime;
291}
292
54fdc581
FC
293static inline int entity_before(struct sched_entity *a,
294 struct sched_entity *b)
295{
296 return (s64)(a->vruntime - b->vruntime) < 0;
297}
298
0702e3eb 299static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
9014623c 300{
30cfdcfc 301 return se->vruntime - cfs_rq->min_vruntime;
9014623c
PZ
302}
303
1af5f730
PZ
304static void update_min_vruntime(struct cfs_rq *cfs_rq)
305{
306 u64 vruntime = cfs_rq->min_vruntime;
307
308 if (cfs_rq->curr)
309 vruntime = cfs_rq->curr->vruntime;
310
311 if (cfs_rq->rb_leftmost) {
312 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
313 struct sched_entity,
314 run_node);
315
e17036da 316 if (!cfs_rq->curr)
1af5f730
PZ
317 vruntime = se->vruntime;
318 else
319 vruntime = min_vruntime(vruntime, se->vruntime);
320 }
321
322 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
323}
324
bf0f6f24
IM
325/*
326 * Enqueue an entity into the rb-tree:
327 */
0702e3eb 328static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
329{
330 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
331 struct rb_node *parent = NULL;
332 struct sched_entity *entry;
9014623c 333 s64 key = entity_key(cfs_rq, se);
bf0f6f24
IM
334 int leftmost = 1;
335
336 /*
337 * Find the right place in the rbtree:
338 */
339 while (*link) {
340 parent = *link;
341 entry = rb_entry(parent, struct sched_entity, run_node);
342 /*
343 * We dont care about collisions. Nodes with
344 * the same key stay together.
345 */
9014623c 346 if (key < entity_key(cfs_rq, entry)) {
bf0f6f24
IM
347 link = &parent->rb_left;
348 } else {
349 link = &parent->rb_right;
350 leftmost = 0;
351 }
352 }
353
354 /*
355 * Maintain a cache of leftmost tree entries (it is frequently
356 * used):
357 */
1af5f730 358 if (leftmost)
57cb499d 359 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
360
361 rb_link_node(&se->run_node, parent, link);
362 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
363}
364
0702e3eb 365static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 366{
3fe69747
PZ
367 if (cfs_rq->rb_leftmost == &se->run_node) {
368 struct rb_node *next_node;
3fe69747
PZ
369
370 next_node = rb_next(&se->run_node);
371 cfs_rq->rb_leftmost = next_node;
3fe69747 372 }
e9acbff6 373
bf0f6f24 374 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
375}
376
bf0f6f24
IM
377static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
378{
f4b6755f
PZ
379 struct rb_node *left = cfs_rq->rb_leftmost;
380
381 if (!left)
382 return NULL;
383
384 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
385}
386
f4b6755f 387static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 388{
7eee3e67 389 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 390
70eee74b
BS
391 if (!last)
392 return NULL;
7eee3e67
IM
393
394 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
395}
396
bf0f6f24
IM
397/**************************************************************
398 * Scheduling class statistics methods:
399 */
400
b2be5e96 401#ifdef CONFIG_SCHED_DEBUG
acb4a848 402int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 403 void __user *buffer, size_t *lenp,
b2be5e96
PZ
404 loff_t *ppos)
405{
8d65af78 406 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 407 int factor = get_update_sysctl_factor();
b2be5e96
PZ
408
409 if (ret || !write)
410 return ret;
411
412 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
413 sysctl_sched_min_granularity);
414
acb4a848
CE
415#define WRT_SYSCTL(name) \
416 (normalized_sysctl_##name = sysctl_##name / (factor))
417 WRT_SYSCTL(sched_min_granularity);
418 WRT_SYSCTL(sched_latency);
419 WRT_SYSCTL(sched_wakeup_granularity);
420 WRT_SYSCTL(sched_shares_ratelimit);
421#undef WRT_SYSCTL
422
b2be5e96
PZ
423 return 0;
424}
425#endif
647e7cac 426
a7be37ac 427/*
f9c0b095 428 * delta /= w
a7be37ac
PZ
429 */
430static inline unsigned long
431calc_delta_fair(unsigned long delta, struct sched_entity *se)
432{
f9c0b095
PZ
433 if (unlikely(se->load.weight != NICE_0_LOAD))
434 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
435
436 return delta;
437}
438
647e7cac
IM
439/*
440 * The idea is to set a period in which each task runs once.
441 *
442 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
443 * this period because otherwise the slices get too small.
444 *
445 * p = (nr <= nl) ? l : l*nr/nl
446 */
4d78e7b6
PZ
447static u64 __sched_period(unsigned long nr_running)
448{
449 u64 period = sysctl_sched_latency;
b2be5e96 450 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
451
452 if (unlikely(nr_running > nr_latency)) {
4bf0b771 453 period = sysctl_sched_min_granularity;
4d78e7b6 454 period *= nr_running;
4d78e7b6
PZ
455 }
456
457 return period;
458}
459
647e7cac
IM
460/*
461 * We calculate the wall-time slice from the period by taking a part
462 * proportional to the weight.
463 *
f9c0b095 464 * s = p*P[w/rw]
647e7cac 465 */
6d0f0ebd 466static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 467{
0a582440 468 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 469
0a582440 470 for_each_sched_entity(se) {
6272d68c 471 struct load_weight *load;
3104bf03 472 struct load_weight lw;
6272d68c
LM
473
474 cfs_rq = cfs_rq_of(se);
475 load = &cfs_rq->load;
f9c0b095 476
0a582440 477 if (unlikely(!se->on_rq)) {
3104bf03 478 lw = cfs_rq->load;
0a582440
MG
479
480 update_load_add(&lw, se->load.weight);
481 load = &lw;
482 }
483 slice = calc_delta_mine(slice, se->load.weight, load);
484 }
485 return slice;
bf0f6f24
IM
486}
487
647e7cac 488/*
ac884dec 489 * We calculate the vruntime slice of a to be inserted task
647e7cac 490 *
f9c0b095 491 * vs = s/w
647e7cac 492 */
f9c0b095 493static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 494{
f9c0b095 495 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
496}
497
bf0f6f24
IM
498/*
499 * Update the current task's runtime statistics. Skip current tasks that
500 * are not in our scheduling class.
501 */
502static inline void
8ebc91d9
IM
503__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
504 unsigned long delta_exec)
bf0f6f24 505{
bbdba7c0 506 unsigned long delta_exec_weighted;
bf0f6f24 507
8179ca23 508 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
bf0f6f24
IM
509
510 curr->sum_exec_runtime += delta_exec;
7a62eabc 511 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 512 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
e9acbff6 513 curr->vruntime += delta_exec_weighted;
1af5f730 514 update_min_vruntime(cfs_rq);
bf0f6f24
IM
515}
516
b7cc0896 517static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 518{
429d43bc 519 struct sched_entity *curr = cfs_rq->curr;
8ebc91d9 520 u64 now = rq_of(cfs_rq)->clock;
bf0f6f24
IM
521 unsigned long delta_exec;
522
523 if (unlikely(!curr))
524 return;
525
526 /*
527 * Get the amount of time the current task was running
528 * since the last time we changed load (this cannot
529 * overflow on 32 bits):
530 */
8ebc91d9 531 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
532 if (!delta_exec)
533 return;
bf0f6f24 534
8ebc91d9
IM
535 __update_curr(cfs_rq, curr, delta_exec);
536 curr->exec_start = now;
d842de87
SV
537
538 if (entity_is_task(curr)) {
539 struct task_struct *curtask = task_of(curr);
540
f977bb49 541 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 542 cpuacct_charge(curtask, delta_exec);
f06febc9 543 account_group_exec_runtime(curtask, delta_exec);
d842de87 544 }
bf0f6f24
IM
545}
546
547static inline void
5870db5b 548update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 549{
d281918d 550 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
551}
552
bf0f6f24
IM
553/*
554 * Task is being enqueued - update stats:
555 */
d2417e5a 556static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 557{
bf0f6f24
IM
558 /*
559 * Are we enqueueing a waiting task? (for current tasks
560 * a dequeue/enqueue event is a NOP)
561 */
429d43bc 562 if (se != cfs_rq->curr)
5870db5b 563 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
564}
565
bf0f6f24 566static void
9ef0a961 567update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 568{
bbdba7c0
IM
569 schedstat_set(se->wait_max, max(se->wait_max,
570 rq_of(cfs_rq)->clock - se->wait_start));
6d082592
AV
571 schedstat_set(se->wait_count, se->wait_count + 1);
572 schedstat_set(se->wait_sum, se->wait_sum +
573 rq_of(cfs_rq)->clock - se->wait_start);
768d0c27
PZ
574#ifdef CONFIG_SCHEDSTATS
575 if (entity_is_task(se)) {
576 trace_sched_stat_wait(task_of(se),
577 rq_of(cfs_rq)->clock - se->wait_start);
578 }
579#endif
e1f84508 580 schedstat_set(se->wait_start, 0);
bf0f6f24
IM
581}
582
583static inline void
19b6a2e3 584update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 585{
bf0f6f24
IM
586 /*
587 * Mark the end of the wait period if dequeueing a
588 * waiting task:
589 */
429d43bc 590 if (se != cfs_rq->curr)
9ef0a961 591 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
592}
593
594/*
595 * We are picking a new current task - update its stats:
596 */
597static inline void
79303e9e 598update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
599{
600 /*
601 * We are starting a new run period:
602 */
d281918d 603 se->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
604}
605
bf0f6f24
IM
606/**************************************************
607 * Scheduling class queueing methods:
608 */
609
c09595f6
PZ
610#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
611static void
612add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
613{
614 cfs_rq->task_weight += weight;
615}
616#else
617static inline void
618add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
619{
620}
621#endif
622
30cfdcfc
DA
623static void
624account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
625{
626 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6
PZ
627 if (!parent_entity(se))
628 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 629 if (entity_is_task(se)) {
c09595f6 630 add_cfs_task_weight(cfs_rq, se->load.weight);
b87f1724
BR
631 list_add(&se->group_node, &cfs_rq->tasks);
632 }
30cfdcfc
DA
633 cfs_rq->nr_running++;
634 se->on_rq = 1;
635}
636
637static void
638account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
639{
640 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6
PZ
641 if (!parent_entity(se))
642 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 643 if (entity_is_task(se)) {
c09595f6 644 add_cfs_task_weight(cfs_rq, -se->load.weight);
b87f1724
BR
645 list_del_init(&se->group_node);
646 }
30cfdcfc
DA
647 cfs_rq->nr_running--;
648 se->on_rq = 0;
649}
650
2396af69 651static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 652{
bf0f6f24 653#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
654 struct task_struct *tsk = NULL;
655
656 if (entity_is_task(se))
657 tsk = task_of(se);
658
bf0f6f24 659 if (se->sleep_start) {
d281918d 660 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
bf0f6f24
IM
661
662 if ((s64)delta < 0)
663 delta = 0;
664
665 if (unlikely(delta > se->sleep_max))
666 se->sleep_max = delta;
667
668 se->sleep_start = 0;
669 se->sum_sleep_runtime += delta;
9745512c 670
768d0c27 671 if (tsk) {
e414314c 672 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
673 trace_sched_stat_sleep(tsk, delta);
674 }
bf0f6f24
IM
675 }
676 if (se->block_start) {
d281918d 677 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
bf0f6f24
IM
678
679 if ((s64)delta < 0)
680 delta = 0;
681
682 if (unlikely(delta > se->block_max))
683 se->block_max = delta;
684
685 se->block_start = 0;
686 se->sum_sleep_runtime += delta;
30084fbd 687
e414314c 688 if (tsk) {
8f0dfc34
AV
689 if (tsk->in_iowait) {
690 se->iowait_sum += delta;
691 se->iowait_count++;
768d0c27 692 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
693 }
694
e414314c
PZ
695 /*
696 * Blocking time is in units of nanosecs, so shift by
697 * 20 to get a milliseconds-range estimation of the
698 * amount of time that the task spent sleeping:
699 */
700 if (unlikely(prof_on == SLEEP_PROFILING)) {
701 profile_hits(SLEEP_PROFILING,
702 (void *)get_wchan(tsk),
703 delta >> 20);
704 }
705 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 706 }
bf0f6f24
IM
707 }
708#endif
709}
710
ddc97297
PZ
711static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
712{
713#ifdef CONFIG_SCHED_DEBUG
714 s64 d = se->vruntime - cfs_rq->min_vruntime;
715
716 if (d < 0)
717 d = -d;
718
719 if (d > 3*sysctl_sched_latency)
720 schedstat_inc(cfs_rq, nr_spread_over);
721#endif
722}
723
aeb73b04
PZ
724static void
725place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
726{
1af5f730 727 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 728
2cb8600e
PZ
729 /*
730 * The 'current' period is already promised to the current tasks,
731 * however the extra weight of the new task will slow them down a
732 * little, place the new task so that it fits in the slot that
733 * stays open at the end.
734 */
94dfb5e7 735 if (initial && sched_feat(START_DEBIT))
f9c0b095 736 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 737
a2e7a7eb
MG
738 /* sleeps up to a single latency don't count. */
739 if (!initial && sched_feat(FAIR_SLEEPERS)) {
740 unsigned long thresh = sysctl_sched_latency;
a7be37ac 741
a2e7a7eb
MG
742 /*
743 * Convert the sleeper threshold into virtual time.
744 * SCHED_IDLE is a special sub-class. We care about
745 * fairness only relative to other SCHED_IDLE tasks,
746 * all of which have the same weight.
747 */
748 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
749 task_of(se)->policy != SCHED_IDLE))
750 thresh = calc_delta_fair(thresh, se);
a7be37ac 751
a2e7a7eb
MG
752 /*
753 * Halve their sleep time's effect, to allow
754 * for a gentler effect of sleepers:
755 */
756 if (sched_feat(GENTLE_FAIR_SLEEPERS))
757 thresh >>= 1;
51e0304c 758
a2e7a7eb 759 vruntime -= thresh;
aeb73b04
PZ
760 }
761
b5d9d734
MG
762 /* ensure we never gain time by being placed backwards. */
763 vruntime = max_vruntime(se->vruntime, vruntime);
764
67e9fb2a 765 se->vruntime = vruntime;
aeb73b04
PZ
766}
767
bf0f6f24 768static void
83b699ed 769enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
bf0f6f24
IM
770{
771 /*
a2a2d680 772 * Update run-time statistics of the 'current'.
bf0f6f24 773 */
b7cc0896 774 update_curr(cfs_rq);
a992241d 775 account_entity_enqueue(cfs_rq, se);
bf0f6f24 776
e9acbff6 777 if (wakeup) {
aeb73b04 778 place_entity(cfs_rq, se, 0);
2396af69 779 enqueue_sleeper(cfs_rq, se);
e9acbff6 780 }
bf0f6f24 781
d2417e5a 782 update_stats_enqueue(cfs_rq, se);
ddc97297 783 check_spread(cfs_rq, se);
83b699ed
SV
784 if (se != cfs_rq->curr)
785 __enqueue_entity(cfs_rq, se);
bf0f6f24
IM
786}
787
a571bbea 788static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2002c695 789{
de69a80b 790 if (!se || cfs_rq->last == se)
2002c695
PZ
791 cfs_rq->last = NULL;
792
de69a80b 793 if (!se || cfs_rq->next == se)
2002c695
PZ
794 cfs_rq->next = NULL;
795}
796
a571bbea
PZ
797static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
798{
799 for_each_sched_entity(se)
800 __clear_buddies(cfs_rq_of(se), se);
801}
802
bf0f6f24 803static void
525c2716 804dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
bf0f6f24 805{
a2a2d680
DA
806 /*
807 * Update run-time statistics of the 'current'.
808 */
809 update_curr(cfs_rq);
810
19b6a2e3 811 update_stats_dequeue(cfs_rq, se);
db36cc7d 812 if (sleep) {
67e9fb2a 813#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
814 if (entity_is_task(se)) {
815 struct task_struct *tsk = task_of(se);
816
817 if (tsk->state & TASK_INTERRUPTIBLE)
d281918d 818 se->sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 819 if (tsk->state & TASK_UNINTERRUPTIBLE)
d281918d 820 se->block_start = rq_of(cfs_rq)->clock;
bf0f6f24 821 }
db36cc7d 822#endif
67e9fb2a
PZ
823 }
824
2002c695 825 clear_buddies(cfs_rq, se);
4793241b 826
83b699ed 827 if (se != cfs_rq->curr)
30cfdcfc
DA
828 __dequeue_entity(cfs_rq, se);
829 account_entity_dequeue(cfs_rq, se);
1af5f730 830 update_min_vruntime(cfs_rq);
bf0f6f24
IM
831}
832
833/*
834 * Preempt the current task with a newly woken task if needed:
835 */
7c92e54f 836static void
2e09bf55 837check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 838{
11697830
PZ
839 unsigned long ideal_runtime, delta_exec;
840
6d0f0ebd 841 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 842 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 843 if (delta_exec > ideal_runtime) {
bf0f6f24 844 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
845 /*
846 * The current task ran long enough, ensure it doesn't get
847 * re-elected due to buddy favours.
848 */
849 clear_buddies(cfs_rq, curr);
f685ceac
MG
850 return;
851 }
852
853 /*
854 * Ensure that a task that missed wakeup preemption by a
855 * narrow margin doesn't have to wait for a full slice.
856 * This also mitigates buddy induced latencies under load.
857 */
858 if (!sched_feat(WAKEUP_PREEMPT))
859 return;
860
861 if (delta_exec < sysctl_sched_min_granularity)
862 return;
863
864 if (cfs_rq->nr_running > 1) {
865 struct sched_entity *se = __pick_next_entity(cfs_rq);
866 s64 delta = curr->vruntime - se->vruntime;
867
868 if (delta > ideal_runtime)
869 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5 870 }
bf0f6f24
IM
871}
872
83b699ed 873static void
8494f412 874set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 875{
83b699ed
SV
876 /* 'current' is not kept within the tree. */
877 if (se->on_rq) {
878 /*
879 * Any task has to be enqueued before it get to execute on
880 * a CPU. So account for the time it spent waiting on the
881 * runqueue.
882 */
883 update_stats_wait_end(cfs_rq, se);
884 __dequeue_entity(cfs_rq, se);
885 }
886
79303e9e 887 update_stats_curr_start(cfs_rq, se);
429d43bc 888 cfs_rq->curr = se;
eba1ed4b
IM
889#ifdef CONFIG_SCHEDSTATS
890 /*
891 * Track our maximum slice length, if the CPU's load is at
892 * least twice that of our own weight (i.e. dont track it
893 * when there are only lesser-weight tasks around):
894 */
495eca49 895 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
eba1ed4b
IM
896 se->slice_max = max(se->slice_max,
897 se->sum_exec_runtime - se->prev_sum_exec_runtime);
898 }
899#endif
4a55b450 900 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
901}
902
3f3a4904
PZ
903static int
904wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
905
f4b6755f 906static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 907{
f4b6755f 908 struct sched_entity *se = __pick_next_entity(cfs_rq);
f685ceac 909 struct sched_entity *left = se;
f4b6755f 910
f685ceac
MG
911 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
912 se = cfs_rq->next;
aa2ac252 913
f685ceac
MG
914 /*
915 * Prefer last buddy, try to return the CPU to a preempted task.
916 */
917 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
918 se = cfs_rq->last;
919
920 clear_buddies(cfs_rq, se);
4793241b
PZ
921
922 return se;
aa2ac252
PZ
923}
924
ab6cde26 925static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
926{
927 /*
928 * If still on the runqueue then deactivate_task()
929 * was not called and update_curr() has to be done:
930 */
931 if (prev->on_rq)
b7cc0896 932 update_curr(cfs_rq);
bf0f6f24 933
ddc97297 934 check_spread(cfs_rq, prev);
30cfdcfc 935 if (prev->on_rq) {
5870db5b 936 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
937 /* Put 'current' back into the tree. */
938 __enqueue_entity(cfs_rq, prev);
939 }
429d43bc 940 cfs_rq->curr = NULL;
bf0f6f24
IM
941}
942
8f4d37ec
PZ
943static void
944entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 945{
bf0f6f24 946 /*
30cfdcfc 947 * Update run-time statistics of the 'current'.
bf0f6f24 948 */
30cfdcfc 949 update_curr(cfs_rq);
bf0f6f24 950
8f4d37ec
PZ
951#ifdef CONFIG_SCHED_HRTICK
952 /*
953 * queued ticks are scheduled to match the slice, so don't bother
954 * validating it and just reschedule.
955 */
983ed7a6
HH
956 if (queued) {
957 resched_task(rq_of(cfs_rq)->curr);
958 return;
959 }
8f4d37ec
PZ
960 /*
961 * don't let the period tick interfere with the hrtick preemption
962 */
963 if (!sched_feat(DOUBLE_TICK) &&
964 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
965 return;
966#endif
967
ce6c1311 968 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
2e09bf55 969 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
970}
971
972/**************************************************
973 * CFS operations on tasks:
974 */
975
8f4d37ec
PZ
976#ifdef CONFIG_SCHED_HRTICK
977static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
978{
8f4d37ec
PZ
979 struct sched_entity *se = &p->se;
980 struct cfs_rq *cfs_rq = cfs_rq_of(se);
981
982 WARN_ON(task_rq(p) != rq);
983
984 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
985 u64 slice = sched_slice(cfs_rq, se);
986 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
987 s64 delta = slice - ran;
988
989 if (delta < 0) {
990 if (rq->curr == p)
991 resched_task(p);
992 return;
993 }
994
995 /*
996 * Don't schedule slices shorter than 10000ns, that just
997 * doesn't make sense. Rely on vruntime for fairness.
998 */
31656519 999 if (rq->curr != p)
157124c1 1000 delta = max_t(s64, 10000LL, delta);
8f4d37ec 1001
31656519 1002 hrtick_start(rq, delta);
8f4d37ec
PZ
1003 }
1004}
a4c2f00f
PZ
1005
1006/*
1007 * called from enqueue/dequeue and updates the hrtick when the
1008 * current task is from our class and nr_running is low enough
1009 * to matter.
1010 */
1011static void hrtick_update(struct rq *rq)
1012{
1013 struct task_struct *curr = rq->curr;
1014
1015 if (curr->sched_class != &fair_sched_class)
1016 return;
1017
1018 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1019 hrtick_start_fair(rq, curr);
1020}
55e12e5e 1021#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
1022static inline void
1023hrtick_start_fair(struct rq *rq, struct task_struct *p)
1024{
1025}
a4c2f00f
PZ
1026
1027static inline void hrtick_update(struct rq *rq)
1028{
1029}
8f4d37ec
PZ
1030#endif
1031
bf0f6f24
IM
1032/*
1033 * The enqueue_task method is called before nr_running is
1034 * increased. Here we update the fair scheduling stats and
1035 * then put the task into the rbtree:
1036 */
fd390f6a 1037static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
bf0f6f24
IM
1038{
1039 struct cfs_rq *cfs_rq;
62fb1851 1040 struct sched_entity *se = &p->se;
bf0f6f24
IM
1041
1042 for_each_sched_entity(se) {
62fb1851 1043 if (se->on_rq)
bf0f6f24
IM
1044 break;
1045 cfs_rq = cfs_rq_of(se);
83b699ed 1046 enqueue_entity(cfs_rq, se, wakeup);
b9fa3df3 1047 wakeup = 1;
bf0f6f24 1048 }
8f4d37ec 1049
a4c2f00f 1050 hrtick_update(rq);
bf0f6f24
IM
1051}
1052
1053/*
1054 * The dequeue_task method is called before nr_running is
1055 * decreased. We remove the task from the rbtree and
1056 * update the fair scheduling stats:
1057 */
f02231e5 1058static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
bf0f6f24
IM
1059{
1060 struct cfs_rq *cfs_rq;
62fb1851 1061 struct sched_entity *se = &p->se;
bf0f6f24
IM
1062
1063 for_each_sched_entity(se) {
1064 cfs_rq = cfs_rq_of(se);
525c2716 1065 dequeue_entity(cfs_rq, se, sleep);
bf0f6f24 1066 /* Don't dequeue parent if it has other entities besides us */
62fb1851 1067 if (cfs_rq->load.weight)
bf0f6f24 1068 break;
b9fa3df3 1069 sleep = 1;
bf0f6f24 1070 }
8f4d37ec 1071
a4c2f00f 1072 hrtick_update(rq);
bf0f6f24
IM
1073}
1074
1075/*
1799e35d
IM
1076 * sched_yield() support is very simple - we dequeue and enqueue.
1077 *
1078 * If compat_yield is turned on then we requeue to the end of the tree.
bf0f6f24 1079 */
4530d7ab 1080static void yield_task_fair(struct rq *rq)
bf0f6f24 1081{
db292ca3
IM
1082 struct task_struct *curr = rq->curr;
1083 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1084 struct sched_entity *rightmost, *se = &curr->se;
bf0f6f24
IM
1085
1086 /*
1799e35d
IM
1087 * Are we the only task in the tree?
1088 */
1089 if (unlikely(cfs_rq->nr_running == 1))
1090 return;
1091
2002c695
PZ
1092 clear_buddies(cfs_rq, se);
1093
db292ca3 1094 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
3e51f33f 1095 update_rq_clock(rq);
1799e35d 1096 /*
a2a2d680 1097 * Update run-time statistics of the 'current'.
1799e35d 1098 */
2b1e315d 1099 update_curr(cfs_rq);
1799e35d
IM
1100
1101 return;
1102 }
1103 /*
1104 * Find the rightmost entry in the rbtree:
bf0f6f24 1105 */
2b1e315d 1106 rightmost = __pick_last_entity(cfs_rq);
1799e35d
IM
1107 /*
1108 * Already in the rightmost position?
1109 */
54fdc581 1110 if (unlikely(!rightmost || entity_before(rightmost, se)))
1799e35d
IM
1111 return;
1112
1113 /*
1114 * Minimally necessary key value to be last in the tree:
2b1e315d
DA
1115 * Upon rescheduling, sched_class::put_prev_task() will place
1116 * 'current' within the tree based on its new key value.
1799e35d 1117 */
30cfdcfc 1118 se->vruntime = rightmost->vruntime + 1;
bf0f6f24
IM
1119}
1120
e7693a36 1121#ifdef CONFIG_SMP
098fb9db 1122
bb3469ac 1123#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
1124/*
1125 * effective_load() calculates the load change as seen from the root_task_group
1126 *
1127 * Adding load to a group doesn't make a group heavier, but can cause movement
1128 * of group shares between cpus. Assuming the shares were perfectly aligned one
1129 * can calculate the shift in shares.
1130 *
1131 * The problem is that perfectly aligning the shares is rather expensive, hence
1132 * we try to avoid doing that too often - see update_shares(), which ratelimits
1133 * this change.
1134 *
1135 * We compensate this by not only taking the current delta into account, but
1136 * also considering the delta between when the shares were last adjusted and
1137 * now.
1138 *
1139 * We still saw a performance dip, some tracing learned us that between
1140 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1141 * significantly. Therefore try to bias the error in direction of failing
1142 * the affine wakeup.
1143 *
1144 */
f1d239f7
PZ
1145static long effective_load(struct task_group *tg, int cpu,
1146 long wl, long wg)
bb3469ac 1147{
4be9daaa 1148 struct sched_entity *se = tg->se[cpu];
f1d239f7
PZ
1149
1150 if (!tg->parent)
1151 return wl;
1152
f5bfb7d9
PZ
1153 /*
1154 * By not taking the decrease of shares on the other cpu into
1155 * account our error leans towards reducing the affine wakeups.
1156 */
1157 if (!wl && sched_feat(ASYM_EFF_LOAD))
1158 return wl;
1159
4be9daaa 1160 for_each_sched_entity(se) {
cb5ef42a 1161 long S, rw, s, a, b;
940959e9
PZ
1162 long more_w;
1163
1164 /*
1165 * Instead of using this increment, also add the difference
1166 * between when the shares were last updated and now.
1167 */
1168 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1169 wl += more_w;
1170 wg += more_w;
4be9daaa
PZ
1171
1172 S = se->my_q->tg->shares;
1173 s = se->my_q->shares;
f1d239f7 1174 rw = se->my_q->rq_weight;
bb3469ac 1175
cb5ef42a
PZ
1176 a = S*(rw + wl);
1177 b = S*rw + s*wg;
4be9daaa 1178
940959e9
PZ
1179 wl = s*(a-b);
1180
1181 if (likely(b))
1182 wl /= b;
1183
83378269
PZ
1184 /*
1185 * Assume the group is already running and will
1186 * thus already be accounted for in the weight.
1187 *
1188 * That is, moving shares between CPUs, does not
1189 * alter the group weight.
1190 */
4be9daaa 1191 wg = 0;
4be9daaa 1192 }
bb3469ac 1193
4be9daaa 1194 return wl;
bb3469ac 1195}
4be9daaa 1196
bb3469ac 1197#else
4be9daaa 1198
83378269
PZ
1199static inline unsigned long effective_load(struct task_group *tg, int cpu,
1200 unsigned long wl, unsigned long wg)
4be9daaa 1201{
83378269 1202 return wl;
bb3469ac 1203}
4be9daaa 1204
bb3469ac
PZ
1205#endif
1206
c88d5910 1207static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 1208{
c88d5910
PZ
1209 struct task_struct *curr = current;
1210 unsigned long this_load, load;
1211 int idx, this_cpu, prev_cpu;
098fb9db 1212 unsigned long tl_per_task;
c88d5910
PZ
1213 unsigned int imbalance;
1214 struct task_group *tg;
83378269 1215 unsigned long weight;
b3137bc8 1216 int balanced;
098fb9db 1217
c88d5910
PZ
1218 idx = sd->wake_idx;
1219 this_cpu = smp_processor_id();
1220 prev_cpu = task_cpu(p);
1221 load = source_load(prev_cpu, idx);
1222 this_load = target_load(this_cpu, idx);
098fb9db 1223
e69b0f1b
PZ
1224 if (sync) {
1225 if (sched_feat(SYNC_LESS) &&
1226 (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1227 p->se.avg_overlap > sysctl_sched_migration_cost))
1228 sync = 0;
1229 } else {
1230 if (sched_feat(SYNC_MORE) &&
1231 (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1232 p->se.avg_overlap < sysctl_sched_migration_cost))
1233 sync = 1;
1234 }
fc631c82 1235
b3137bc8
MG
1236 /*
1237 * If sync wakeup then subtract the (maximum possible)
1238 * effect of the currently running task from the load
1239 * of the current CPU:
1240 */
83378269
PZ
1241 if (sync) {
1242 tg = task_group(current);
1243 weight = current->se.load.weight;
1244
c88d5910 1245 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
1246 load += effective_load(tg, prev_cpu, 0, -weight);
1247 }
b3137bc8 1248
83378269
PZ
1249 tg = task_group(p);
1250 weight = p->se.load.weight;
b3137bc8 1251
c88d5910
PZ
1252 imbalance = 100 + (sd->imbalance_pct - 100) / 2;
1253
71a29aa7
PZ
1254 /*
1255 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
1256 * due to the sync cause above having dropped this_load to 0, we'll
1257 * always have an imbalance, but there's really nothing you can do
1258 * about that, so that's good too.
71a29aa7
PZ
1259 *
1260 * Otherwise check if either cpus are near enough in load to allow this
1261 * task to be woken on this_cpu.
1262 */
c88d5910
PZ
1263 balanced = !this_load ||
1264 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
83378269 1265 imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
b3137bc8 1266
098fb9db 1267 /*
4ae7d5ce
IM
1268 * If the currently running task will sleep within
1269 * a reasonable amount of time then attract this newly
1270 * woken task:
098fb9db 1271 */
2fb7635c
PZ
1272 if (sync && balanced)
1273 return 1;
098fb9db
IM
1274
1275 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1276 tl_per_task = cpu_avg_load_per_task(this_cpu);
1277
c88d5910
PZ
1278 if (balanced ||
1279 (this_load <= load &&
1280 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
1281 /*
1282 * This domain has SD_WAKE_AFFINE and
1283 * p is cache cold in this domain, and
1284 * there is no bad imbalance.
1285 */
c88d5910 1286 schedstat_inc(sd, ttwu_move_affine);
098fb9db
IM
1287 schedstat_inc(p, se.nr_wakeups_affine);
1288
1289 return 1;
1290 }
1291 return 0;
1292}
1293
aaee1203
PZ
1294/*
1295 * find_idlest_group finds and returns the least busy CPU group within the
1296 * domain.
1297 */
1298static struct sched_group *
78e7ed53 1299find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 1300 int this_cpu, int load_idx)
e7693a36 1301{
aaee1203
PZ
1302 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1303 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 1304 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 1305
aaee1203
PZ
1306 do {
1307 unsigned long load, avg_load;
1308 int local_group;
1309 int i;
e7693a36 1310
aaee1203
PZ
1311 /* Skip over this group if it has no CPUs allowed */
1312 if (!cpumask_intersects(sched_group_cpus(group),
1313 &p->cpus_allowed))
1314 continue;
1315
1316 local_group = cpumask_test_cpu(this_cpu,
1317 sched_group_cpus(group));
1318
1319 /* Tally up the load of all CPUs in the group */
1320 avg_load = 0;
1321
1322 for_each_cpu(i, sched_group_cpus(group)) {
1323 /* Bias balancing toward cpus of our domain */
1324 if (local_group)
1325 load = source_load(i, load_idx);
1326 else
1327 load = target_load(i, load_idx);
1328
1329 avg_load += load;
1330 }
1331
1332 /* Adjust by relative CPU power of the group */
1333 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1334
1335 if (local_group) {
1336 this_load = avg_load;
1337 this = group;
1338 } else if (avg_load < min_load) {
1339 min_load = avg_load;
1340 idlest = group;
1341 }
1342 } while (group = group->next, group != sd->groups);
1343
1344 if (!idlest || 100*this_load < imbalance*min_load)
1345 return NULL;
1346 return idlest;
1347}
1348
1349/*
1350 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1351 */
1352static int
1353find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1354{
1355 unsigned long load, min_load = ULONG_MAX;
1356 int idlest = -1;
1357 int i;
1358
1359 /* Traverse only the allowed CPUs */
1360 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1361 load = weighted_cpuload(i);
1362
1363 if (load < min_load || (load == min_load && i == this_cpu)) {
1364 min_load = load;
1365 idlest = i;
e7693a36
GH
1366 }
1367 }
1368
aaee1203
PZ
1369 return idlest;
1370}
e7693a36 1371
a50bde51
PZ
1372/*
1373 * Try and locate an idle CPU in the sched_domain.
1374 */
1375static int
1376select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1377{
1378 int cpu = smp_processor_id();
1379 int prev_cpu = task_cpu(p);
1380 int i;
1381
1382 /*
1383 * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
1384 * test in select_task_rq_fair) and the prev_cpu is idle then that's
1385 * always a better target than the current cpu.
1386 */
fe3bcfe1
PZ
1387 if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
1388 return prev_cpu;
a50bde51
PZ
1389
1390 /*
1391 * Otherwise, iterate the domain and find an elegible idle cpu.
1392 */
fe3bcfe1
PZ
1393 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1394 if (!cpu_rq(i)->cfs.nr_running) {
1395 target = i;
1396 break;
a50bde51
PZ
1397 }
1398 }
1399
1400 return target;
1401}
1402
aaee1203
PZ
1403/*
1404 * sched_balance_self: balance the current task (running on cpu) in domains
1405 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1406 * SD_BALANCE_EXEC.
1407 *
1408 * Balance, ie. select the least loaded group.
1409 *
1410 * Returns the target CPU number, or the same CPU if no balancing is needed.
1411 *
1412 * preempt must be disabled.
1413 */
5158f4e4 1414static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 1415{
29cd8bae 1416 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
1417 int cpu = smp_processor_id();
1418 int prev_cpu = task_cpu(p);
1419 int new_cpu = cpu;
1420 int want_affine = 0;
29cd8bae 1421 int want_sd = 1;
5158f4e4 1422 int sync = wake_flags & WF_SYNC;
c88d5910 1423
0763a660 1424 if (sd_flag & SD_BALANCE_WAKE) {
3f04e8cd
MG
1425 if (sched_feat(AFFINE_WAKEUPS) &&
1426 cpumask_test_cpu(cpu, &p->cpus_allowed))
c88d5910
PZ
1427 want_affine = 1;
1428 new_cpu = prev_cpu;
1429 }
aaee1203
PZ
1430
1431 for_each_domain(cpu, tmp) {
1432 /*
ae154be1
PZ
1433 * If power savings logic is enabled for a domain, see if we
1434 * are not overloaded, if so, don't balance wider.
aaee1203 1435 */
59abf026 1436 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
ae154be1
PZ
1437 unsigned long power = 0;
1438 unsigned long nr_running = 0;
1439 unsigned long capacity;
1440 int i;
1441
1442 for_each_cpu(i, sched_domain_span(tmp)) {
1443 power += power_of(i);
1444 nr_running += cpu_rq(i)->cfs.nr_running;
1445 }
1446
1447 capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1448
59abf026
PZ
1449 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1450 nr_running /= 2;
1451
1452 if (nr_running < capacity)
29cd8bae 1453 want_sd = 0;
ae154be1 1454 }
aaee1203 1455
fe3bcfe1
PZ
1456 /*
1457 * While iterating the domains looking for a spanning
1458 * WAKE_AFFINE domain, adjust the affine target to any idle cpu
1459 * in cache sharing domains along the way.
1460 */
1461 if (want_affine) {
a50bde51 1462 int target = -1;
c88d5910 1463
a50bde51
PZ
1464 /*
1465 * If both cpu and prev_cpu are part of this domain,
1466 * cpu is a valid SD_WAKE_AFFINE target.
1467 */
a1f84a3a 1468 if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
a50bde51 1469 target = cpu;
a1f84a3a
MG
1470
1471 /*
a50bde51
PZ
1472 * If there's an idle sibling in this domain, make that
1473 * the wake_affine target instead of the current cpu.
a1f84a3a 1474 */
a50bde51
PZ
1475 if (tmp->flags & SD_PREFER_SIBLING)
1476 target = select_idle_sibling(p, tmp, target);
a1f84a3a 1477
a50bde51 1478 if (target >= 0) {
fe3bcfe1
PZ
1479 if (tmp->flags & SD_WAKE_AFFINE) {
1480 affine_sd = tmp;
1481 want_affine = 0;
1482 }
a50bde51 1483 cpu = target;
a1f84a3a 1484 }
c88d5910
PZ
1485 }
1486
29cd8bae
PZ
1487 if (!want_sd && !want_affine)
1488 break;
1489
0763a660 1490 if (!(tmp->flags & sd_flag))
c88d5910
PZ
1491 continue;
1492
29cd8bae
PZ
1493 if (want_sd)
1494 sd = tmp;
1495 }
1496
1497 if (sched_feat(LB_SHARES_UPDATE)) {
1498 /*
1499 * Pick the largest domain to update shares over
1500 */
1501 tmp = sd;
1502 if (affine_sd && (!tmp ||
1503 cpumask_weight(sched_domain_span(affine_sd)) >
1504 cpumask_weight(sched_domain_span(sd))))
1505 tmp = affine_sd;
1506
1507 if (tmp)
1508 update_shares(tmp);
c88d5910 1509 }
aaee1203 1510
fb58bac5
PZ
1511 if (affine_sd && wake_affine(affine_sd, p, sync))
1512 return cpu;
e7693a36 1513
aaee1203 1514 while (sd) {
5158f4e4 1515 int load_idx = sd->forkexec_idx;
aaee1203 1516 struct sched_group *group;
c88d5910 1517 int weight;
098fb9db 1518
0763a660 1519 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
1520 sd = sd->child;
1521 continue;
1522 }
098fb9db 1523
5158f4e4
PZ
1524 if (sd_flag & SD_BALANCE_WAKE)
1525 load_idx = sd->wake_idx;
098fb9db 1526
5158f4e4 1527 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
1528 if (!group) {
1529 sd = sd->child;
1530 continue;
1531 }
4ae7d5ce 1532
d7c33c49 1533 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
1534 if (new_cpu == -1 || new_cpu == cpu) {
1535 /* Now try balancing at a lower domain level of cpu */
1536 sd = sd->child;
1537 continue;
e7693a36 1538 }
aaee1203
PZ
1539
1540 /* Now try balancing at a lower domain level of new_cpu */
1541 cpu = new_cpu;
1542 weight = cpumask_weight(sched_domain_span(sd));
1543 sd = NULL;
1544 for_each_domain(cpu, tmp) {
1545 if (weight <= cpumask_weight(sched_domain_span(tmp)))
1546 break;
0763a660 1547 if (tmp->flags & sd_flag)
aaee1203
PZ
1548 sd = tmp;
1549 }
1550 /* while loop will break here if sd == NULL */
e7693a36
GH
1551 }
1552
c88d5910 1553 return new_cpu;
e7693a36
GH
1554}
1555#endif /* CONFIG_SMP */
1556
e52fb7c0
PZ
1557/*
1558 * Adaptive granularity
1559 *
1560 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1561 * with the limit of wakeup_gran -- when it never does a wakeup.
1562 *
1563 * So the smaller avg_wakeup is the faster we want this task to preempt,
1564 * but we don't want to treat the preemptee unfairly and therefore allow it
1565 * to run for at least the amount of time we'd like to run.
1566 *
1567 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1568 *
1569 * NOTE: we use *nr_running to scale with load, this nicely matches the
1570 * degrading latency on load.
1571 */
1572static unsigned long
1573adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1574{
1575 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1576 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1577 u64 gran = 0;
1578
1579 if (this_run < expected_wakeup)
1580 gran = expected_wakeup - this_run;
1581
1582 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1583}
1584
1585static unsigned long
1586wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
1587{
1588 unsigned long gran = sysctl_sched_wakeup_granularity;
1589
e52fb7c0
PZ
1590 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1591 gran = adaptive_gran(curr, se);
1592
0bbd3336 1593 /*
e52fb7c0
PZ
1594 * Since its curr running now, convert the gran from real-time
1595 * to virtual-time in his units.
0bbd3336 1596 */
e52fb7c0
PZ
1597 if (sched_feat(ASYM_GRAN)) {
1598 /*
1599 * By using 'se' instead of 'curr' we penalize light tasks, so
1600 * they get preempted easier. That is, if 'se' < 'curr' then
1601 * the resulting gran will be larger, therefore penalizing the
1602 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1603 * be smaller, again penalizing the lighter task.
1604 *
1605 * This is especially important for buddies when the leftmost
1606 * task is higher priority than the buddy.
1607 */
1608 if (unlikely(se->load.weight != NICE_0_LOAD))
1609 gran = calc_delta_fair(gran, se);
1610 } else {
1611 if (unlikely(curr->load.weight != NICE_0_LOAD))
1612 gran = calc_delta_fair(gran, curr);
1613 }
0bbd3336
PZ
1614
1615 return gran;
1616}
1617
464b7527
PZ
1618/*
1619 * Should 'se' preempt 'curr'.
1620 *
1621 * |s1
1622 * |s2
1623 * |s3
1624 * g
1625 * |<--->|c
1626 *
1627 * w(c, s1) = -1
1628 * w(c, s2) = 0
1629 * w(c, s3) = 1
1630 *
1631 */
1632static int
1633wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1634{
1635 s64 gran, vdiff = curr->vruntime - se->vruntime;
1636
1637 if (vdiff <= 0)
1638 return -1;
1639
e52fb7c0 1640 gran = wakeup_gran(curr, se);
464b7527
PZ
1641 if (vdiff > gran)
1642 return 1;
1643
1644 return 0;
1645}
1646
02479099
PZ
1647static void set_last_buddy(struct sched_entity *se)
1648{
6bc912b7
PZ
1649 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1650 for_each_sched_entity(se)
1651 cfs_rq_of(se)->last = se;
1652 }
02479099
PZ
1653}
1654
1655static void set_next_buddy(struct sched_entity *se)
1656{
6bc912b7
PZ
1657 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1658 for_each_sched_entity(se)
1659 cfs_rq_of(se)->next = se;
1660 }
02479099
PZ
1661}
1662
bf0f6f24
IM
1663/*
1664 * Preempt the current task with a newly woken task if needed:
1665 */
5a9b86f6 1666static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
1667{
1668 struct task_struct *curr = rq->curr;
8651a86c 1669 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 1670 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5a9b86f6 1671 int sync = wake_flags & WF_SYNC;
f685ceac 1672 int scale = cfs_rq->nr_running >= sched_nr_latency;
bf0f6f24 1673
3a7e73a2
PZ
1674 if (unlikely(rt_prio(p->prio)))
1675 goto preempt;
aa2ac252 1676
d95f98d0
PZ
1677 if (unlikely(p->sched_class != &fair_sched_class))
1678 return;
1679
4ae7d5ce
IM
1680 if (unlikely(se == pse))
1681 return;
1682
f685ceac 1683 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
3cb63d52 1684 set_next_buddy(pse);
57fdc26d 1685
aec0a514
BR
1686 /*
1687 * We can come here with TIF_NEED_RESCHED already set from new task
1688 * wake up path.
1689 */
1690 if (test_tsk_need_resched(curr))
1691 return;
1692
91c234b4 1693 /*
6bc912b7 1694 * Batch and idle tasks do not preempt (their preemption is driven by
91c234b4
IM
1695 * the tick):
1696 */
6bc912b7 1697 if (unlikely(p->policy != SCHED_NORMAL))
91c234b4 1698 return;
bf0f6f24 1699
6bc912b7 1700 /* Idle tasks are by definition preempted by everybody. */
3a7e73a2
PZ
1701 if (unlikely(curr->policy == SCHED_IDLE))
1702 goto preempt;
bf0f6f24 1703
3a7e73a2
PZ
1704 if (sched_feat(WAKEUP_SYNC) && sync)
1705 goto preempt;
15afe09b 1706
3a7e73a2
PZ
1707 if (sched_feat(WAKEUP_OVERLAP) &&
1708 se->avg_overlap < sysctl_sched_migration_cost &&
1709 pse->avg_overlap < sysctl_sched_migration_cost)
1710 goto preempt;
1711
ad4b78bb
PZ
1712 if (!sched_feat(WAKEUP_PREEMPT))
1713 return;
1714
3a7e73a2 1715 update_curr(cfs_rq);
464b7527 1716 find_matching_se(&se, &pse);
002f128b 1717 BUG_ON(!pse);
3a7e73a2
PZ
1718 if (wakeup_preempt_entity(se, pse) == 1)
1719 goto preempt;
464b7527 1720
3a7e73a2 1721 return;
a65ac745 1722
3a7e73a2
PZ
1723preempt:
1724 resched_task(curr);
1725 /*
1726 * Only set the backward buddy when the current task is still
1727 * on the rq. This can happen when a wakeup gets interleaved
1728 * with schedule on the ->pre_schedule() or idle_balance()
1729 * point, either of which can * drop the rq lock.
1730 *
1731 * Also, during early boot the idle thread is in the fair class,
1732 * for obvious reasons its a bad idea to schedule back to it.
1733 */
1734 if (unlikely(!se->on_rq || curr == rq->idle))
1735 return;
1736
1737 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1738 set_last_buddy(se);
bf0f6f24
IM
1739}
1740
fb8d4724 1741static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 1742{
8f4d37ec 1743 struct task_struct *p;
bf0f6f24
IM
1744 struct cfs_rq *cfs_rq = &rq->cfs;
1745 struct sched_entity *se;
1746
36ace27e 1747 if (!cfs_rq->nr_running)
bf0f6f24
IM
1748 return NULL;
1749
1750 do {
9948f4b2 1751 se = pick_next_entity(cfs_rq);
f4b6755f 1752 set_next_entity(cfs_rq, se);
bf0f6f24
IM
1753 cfs_rq = group_cfs_rq(se);
1754 } while (cfs_rq);
1755
8f4d37ec
PZ
1756 p = task_of(se);
1757 hrtick_start_fair(rq, p);
1758
1759 return p;
bf0f6f24
IM
1760}
1761
1762/*
1763 * Account for a descheduled task:
1764 */
31ee529c 1765static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
1766{
1767 struct sched_entity *se = &prev->se;
1768 struct cfs_rq *cfs_rq;
1769
1770 for_each_sched_entity(se) {
1771 cfs_rq = cfs_rq_of(se);
ab6cde26 1772 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
1773 }
1774}
1775
681f3e68 1776#ifdef CONFIG_SMP
bf0f6f24
IM
1777/**************************************************
1778 * Fair scheduling class load-balancing methods:
1779 */
1780
1781/*
1782 * Load-balancing iterator. Note: while the runqueue stays locked
1783 * during the whole iteration, the current task might be
1784 * dequeued so the iterator has to be dequeue-safe. Here we
1785 * achieve that by always pre-iterating before returning
1786 * the current task:
1787 */
a9957449 1788static struct task_struct *
4a55bd5e 1789__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
bf0f6f24 1790{
354d60c2
DG
1791 struct task_struct *p = NULL;
1792 struct sched_entity *se;
bf0f6f24 1793
77ae6513
MG
1794 if (next == &cfs_rq->tasks)
1795 return NULL;
1796
b87f1724
BR
1797 se = list_entry(next, struct sched_entity, group_node);
1798 p = task_of(se);
1799 cfs_rq->balance_iterator = next->next;
77ae6513 1800
bf0f6f24
IM
1801 return p;
1802}
1803
1804static struct task_struct *load_balance_start_fair(void *arg)
1805{
1806 struct cfs_rq *cfs_rq = arg;
1807
4a55bd5e 1808 return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
bf0f6f24
IM
1809}
1810
1811static struct task_struct *load_balance_next_fair(void *arg)
1812{
1813 struct cfs_rq *cfs_rq = arg;
1814
4a55bd5e 1815 return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
bf0f6f24
IM
1816}
1817
c09595f6
PZ
1818static unsigned long
1819__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1820 unsigned long max_load_move, struct sched_domain *sd,
1821 enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1822 struct cfs_rq *cfs_rq)
62fb1851 1823{
c09595f6 1824 struct rq_iterator cfs_rq_iterator;
62fb1851 1825
c09595f6
PZ
1826 cfs_rq_iterator.start = load_balance_start_fair;
1827 cfs_rq_iterator.next = load_balance_next_fair;
1828 cfs_rq_iterator.arg = cfs_rq;
62fb1851 1829
c09595f6
PZ
1830 return balance_tasks(this_rq, this_cpu, busiest,
1831 max_load_move, sd, idle, all_pinned,
1832 this_best_prio, &cfs_rq_iterator);
62fb1851 1833}
62fb1851 1834
c09595f6 1835#ifdef CONFIG_FAIR_GROUP_SCHED
43010659 1836static unsigned long
bf0f6f24 1837load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f 1838 unsigned long max_load_move,
a4ac01c3
PW
1839 struct sched_domain *sd, enum cpu_idle_type idle,
1840 int *all_pinned, int *this_best_prio)
bf0f6f24 1841{
bf0f6f24 1842 long rem_load_move = max_load_move;
c09595f6
PZ
1843 int busiest_cpu = cpu_of(busiest);
1844 struct task_group *tg;
18d95a28 1845
c09595f6 1846 rcu_read_lock();
c8cba857 1847 update_h_load(busiest_cpu);
18d95a28 1848
caea8a03 1849 list_for_each_entry_rcu(tg, &task_groups, list) {
c8cba857 1850 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
42a3ac7d
PZ
1851 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1852 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
243e0e7b 1853 u64 rem_load, moved_load;
18d95a28 1854
c09595f6
PZ
1855 /*
1856 * empty group
1857 */
c8cba857 1858 if (!busiest_cfs_rq->task_weight)
bf0f6f24
IM
1859 continue;
1860
243e0e7b
SV
1861 rem_load = (u64)rem_load_move * busiest_weight;
1862 rem_load = div_u64(rem_load, busiest_h_load + 1);
bf0f6f24 1863
c09595f6 1864 moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
53fecd8a 1865 rem_load, sd, idle, all_pinned, this_best_prio,
c09595f6 1866 tg->cfs_rq[busiest_cpu]);
bf0f6f24 1867
c09595f6 1868 if (!moved_load)
bf0f6f24
IM
1869 continue;
1870
42a3ac7d 1871 moved_load *= busiest_h_load;
243e0e7b 1872 moved_load = div_u64(moved_load, busiest_weight + 1);
bf0f6f24 1873
c09595f6
PZ
1874 rem_load_move -= moved_load;
1875 if (rem_load_move < 0)
bf0f6f24
IM
1876 break;
1877 }
c09595f6 1878 rcu_read_unlock();
bf0f6f24 1879
43010659 1880 return max_load_move - rem_load_move;
bf0f6f24 1881}
c09595f6
PZ
1882#else
1883static unsigned long
1884load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1885 unsigned long max_load_move,
1886 struct sched_domain *sd, enum cpu_idle_type idle,
1887 int *all_pinned, int *this_best_prio)
1888{
1889 return __load_balance_fair(this_rq, this_cpu, busiest,
1890 max_load_move, sd, idle, all_pinned,
1891 this_best_prio, &busiest->cfs);
1892}
1893#endif
bf0f6f24 1894
e1d1484f
PW
1895static int
1896move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1897 struct sched_domain *sd, enum cpu_idle_type idle)
1898{
1899 struct cfs_rq *busy_cfs_rq;
1900 struct rq_iterator cfs_rq_iterator;
1901
1902 cfs_rq_iterator.start = load_balance_start_fair;
1903 cfs_rq_iterator.next = load_balance_next_fair;
1904
1905 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1906 /*
1907 * pass busy_cfs_rq argument into
1908 * load_balance_[start|next]_fair iterators
1909 */
1910 cfs_rq_iterator.arg = busy_cfs_rq;
1911 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1912 &cfs_rq_iterator))
1913 return 1;
1914 }
1915
1916 return 0;
1917}
0bcdcf28
CE
1918
1919static void rq_online_fair(struct rq *rq)
1920{
1921 update_sysctl();
1922}
1923
1924static void rq_offline_fair(struct rq *rq)
1925{
1926 update_sysctl();
1927}
1928
55e12e5e 1929#endif /* CONFIG_SMP */
e1d1484f 1930
bf0f6f24
IM
1931/*
1932 * scheduler tick hitting a task of our scheduling class:
1933 */
8f4d37ec 1934static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
1935{
1936 struct cfs_rq *cfs_rq;
1937 struct sched_entity *se = &curr->se;
1938
1939 for_each_sched_entity(se) {
1940 cfs_rq = cfs_rq_of(se);
8f4d37ec 1941 entity_tick(cfs_rq, se, queued);
bf0f6f24
IM
1942 }
1943}
1944
1945/*
cd29fe6f
PZ
1946 * called on fork with the child task as argument from the parent's context
1947 * - child not yet on the tasklist
1948 * - preemption disabled
bf0f6f24 1949 */
cd29fe6f 1950static void task_fork_fair(struct task_struct *p)
bf0f6f24 1951{
cd29fe6f 1952 struct cfs_rq *cfs_rq = task_cfs_rq(current);
429d43bc 1953 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
00bf7bfc 1954 int this_cpu = smp_processor_id();
cd29fe6f
PZ
1955 struct rq *rq = this_rq();
1956 unsigned long flags;
1957
1958 spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 1959
cd29fe6f
PZ
1960 if (unlikely(task_cpu(p) != this_cpu))
1961 __set_task_cpu(p, this_cpu);
bf0f6f24 1962
7109c442 1963 update_curr(cfs_rq);
cd29fe6f 1964
b5d9d734
MG
1965 if (curr)
1966 se->vruntime = curr->vruntime;
aeb73b04 1967 place_entity(cfs_rq, se, 1);
4d78e7b6 1968
cd29fe6f 1969 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 1970 /*
edcb60a3
IM
1971 * Upon rescheduling, sched_class::put_prev_task() will place
1972 * 'current' within the tree based on its new key value.
1973 */
4d78e7b6 1974 swap(curr->vruntime, se->vruntime);
aec0a514 1975 resched_task(rq->curr);
4d78e7b6 1976 }
bf0f6f24 1977
cd29fe6f 1978 spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
1979}
1980
cb469845
SR
1981/*
1982 * Priority of the task has changed. Check to see if we preempt
1983 * the current task.
1984 */
1985static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1986 int oldprio, int running)
1987{
1988 /*
1989 * Reschedule if we are currently running on this runqueue and
1990 * our priority decreased, or if we are not currently running on
1991 * this runqueue and our priority is higher than the current's
1992 */
1993 if (running) {
1994 if (p->prio > oldprio)
1995 resched_task(rq->curr);
1996 } else
15afe09b 1997 check_preempt_curr(rq, p, 0);
cb469845
SR
1998}
1999
2000/*
2001 * We switched to the sched_fair class.
2002 */
2003static void switched_to_fair(struct rq *rq, struct task_struct *p,
2004 int running)
2005{
2006 /*
2007 * We were most likely switched from sched_rt, so
2008 * kick off the schedule if running, otherwise just see
2009 * if we can still preempt the current task.
2010 */
2011 if (running)
2012 resched_task(rq->curr);
2013 else
15afe09b 2014 check_preempt_curr(rq, p, 0);
cb469845
SR
2015}
2016
83b699ed
SV
2017/* Account for a task changing its policy or group.
2018 *
2019 * This routine is mostly called to set cfs_rq->curr field when a task
2020 * migrates between groups/classes.
2021 */
2022static void set_curr_task_fair(struct rq *rq)
2023{
2024 struct sched_entity *se = &rq->curr->se;
2025
2026 for_each_sched_entity(se)
2027 set_next_entity(cfs_rq_of(se), se);
2028}
2029
810b3817
PZ
2030#ifdef CONFIG_FAIR_GROUP_SCHED
2031static void moved_group_fair(struct task_struct *p)
2032{
2033 struct cfs_rq *cfs_rq = task_cfs_rq(p);
2034
2035 update_curr(cfs_rq);
2036 place_entity(cfs_rq, &p->se, 1);
2037}
2038#endif
2039
dba091b9 2040unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
2041{
2042 struct sched_entity *se = &task->se;
0d721cea
PW
2043 unsigned int rr_interval = 0;
2044
2045 /*
2046 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
2047 * idle runqueue:
2048 */
0d721cea
PW
2049 if (rq->cfs.load.weight)
2050 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
0d721cea
PW
2051
2052 return rr_interval;
2053}
2054
bf0f6f24
IM
2055/*
2056 * All the scheduling class methods:
2057 */
5522d5d5
IM
2058static const struct sched_class fair_sched_class = {
2059 .next = &idle_sched_class,
bf0f6f24
IM
2060 .enqueue_task = enqueue_task_fair,
2061 .dequeue_task = dequeue_task_fair,
2062 .yield_task = yield_task_fair,
2063
2e09bf55 2064 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
2065
2066 .pick_next_task = pick_next_task_fair,
2067 .put_prev_task = put_prev_task_fair,
2068
681f3e68 2069#ifdef CONFIG_SMP
4ce72a2c
LZ
2070 .select_task_rq = select_task_rq_fair,
2071
bf0f6f24 2072 .load_balance = load_balance_fair,
e1d1484f 2073 .move_one_task = move_one_task_fair,
0bcdcf28
CE
2074 .rq_online = rq_online_fair,
2075 .rq_offline = rq_offline_fair,
681f3e68 2076#endif
bf0f6f24 2077
83b699ed 2078 .set_curr_task = set_curr_task_fair,
bf0f6f24 2079 .task_tick = task_tick_fair,
cd29fe6f 2080 .task_fork = task_fork_fair,
cb469845
SR
2081
2082 .prio_changed = prio_changed_fair,
2083 .switched_to = switched_to_fair,
810b3817 2084
0d721cea
PW
2085 .get_rr_interval = get_rr_interval_fair,
2086
810b3817
PZ
2087#ifdef CONFIG_FAIR_GROUP_SCHED
2088 .moved_group = moved_group_fair,
2089#endif
bf0f6f24
IM
2090};
2091
2092#ifdef CONFIG_SCHED_DEBUG
5cef9eca 2093static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 2094{
bf0f6f24
IM
2095 struct cfs_rq *cfs_rq;
2096
5973e5b9 2097 rcu_read_lock();
c3b64f1e 2098 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 2099 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 2100 rcu_read_unlock();
bf0f6f24
IM
2101}
2102#endif