]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched_fair.c
drivers/scsi: Remove unnecessary casts of private_data
[net-next-2.6.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
9745512c 25
bf0f6f24 26/*
21805085 27 * Targeted preemption latency for CPU-bound tasks:
172e082a 28 * (default: 5ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 29 *
21805085 30 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
31 * 'timeslice length' - timeslices in CFS are of variable length
32 * and have no persistent notion like in traditional, time-slice
33 * based scheduling concepts.
bf0f6f24 34 *
d274a4ce
IM
35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 37 */
21406928
MG
38unsigned int sysctl_sched_latency = 6000000ULL;
39unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 40
1983a922
CE
41/*
42 * The initial- and re-scaling of tunables is configurable
43 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
44 *
45 * Options are:
46 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
47 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
48 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
49 */
50enum sched_tunable_scaling sysctl_sched_tunable_scaling
51 = SCHED_TUNABLESCALING_LOG;
52
2bd8e6d4 53/*
b2be5e96 54 * Minimal preemption granularity for CPU-bound tasks:
21406928 55 * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 56 */
21406928
MG
57unsigned int sysctl_sched_min_granularity = 2000000ULL;
58unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
21805085
PZ
59
60/*
b2be5e96
PZ
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */
21406928 63static unsigned int sched_nr_latency = 3;
b2be5e96
PZ
64
65/*
2bba22c5 66 * After fork, child runs first. If set to 0 (default) then
b2be5e96 67 * parent will (try to) run first.
21805085 68 */
2bba22c5 69unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 70
1799e35d
IM
71/*
72 * sys_sched_yield() compat mode
73 *
74 * This option switches the agressive yield implementation of the
75 * old scheduler back on.
76 */
77unsigned int __read_mostly sysctl_sched_compat_yield;
78
bf0f6f24
IM
79/*
80 * SCHED_OTHER wake-up granularity.
172e082a 81 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
82 *
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
86 */
172e082a 87unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 88unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 89
da84d961
IM
90const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91
a4c2f00f
PZ
92static const struct sched_class fair_sched_class;
93
bf0f6f24
IM
94/**************************************************************
95 * CFS operations on generic schedulable entities:
96 */
97
62160e3f 98#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 99
62160e3f 100/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
62160e3f 103 return cfs_rq->rq;
bf0f6f24
IM
104}
105
62160e3f
IM
106/* An entity is a task if it doesn't "own" a runqueue */
107#define entity_is_task(se) (!se->my_q)
bf0f6f24 108
8f48894f
PZ
109static inline struct task_struct *task_of(struct sched_entity *se)
110{
111#ifdef CONFIG_SCHED_DEBUG
112 WARN_ON_ONCE(!entity_is_task(se));
113#endif
114 return container_of(se, struct task_struct, se);
115}
116
b758149c
PZ
117/* Walk up scheduling entities hierarchy */
118#define for_each_sched_entity(se) \
119 for (; se; se = se->parent)
120
121static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
122{
123 return p->se.cfs_rq;
124}
125
126/* runqueue on which this entity is (to be) queued */
127static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
128{
129 return se->cfs_rq;
130}
131
132/* runqueue "owned" by this group */
133static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
134{
135 return grp->my_q;
136}
137
138/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
139 * another cpu ('this_cpu')
140 */
141static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
142{
143 return cfs_rq->tg->cfs_rq[this_cpu];
144}
145
146/* Iterate thr' all leaf cfs_rq's on a runqueue */
147#define for_each_leaf_cfs_rq(rq, cfs_rq) \
148 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
149
150/* Do the two (enqueued) entities belong to the same group ? */
151static inline int
152is_same_group(struct sched_entity *se, struct sched_entity *pse)
153{
154 if (se->cfs_rq == pse->cfs_rq)
155 return 1;
156
157 return 0;
158}
159
160static inline struct sched_entity *parent_entity(struct sched_entity *se)
161{
162 return se->parent;
163}
164
464b7527
PZ
165/* return depth at which a sched entity is present in the hierarchy */
166static inline int depth_se(struct sched_entity *se)
167{
168 int depth = 0;
169
170 for_each_sched_entity(se)
171 depth++;
172
173 return depth;
174}
175
176static void
177find_matching_se(struct sched_entity **se, struct sched_entity **pse)
178{
179 int se_depth, pse_depth;
180
181 /*
182 * preemption test can be made between sibling entities who are in the
183 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
184 * both tasks until we find their ancestors who are siblings of common
185 * parent.
186 */
187
188 /* First walk up until both entities are at same depth */
189 se_depth = depth_se(*se);
190 pse_depth = depth_se(*pse);
191
192 while (se_depth > pse_depth) {
193 se_depth--;
194 *se = parent_entity(*se);
195 }
196
197 while (pse_depth > se_depth) {
198 pse_depth--;
199 *pse = parent_entity(*pse);
200 }
201
202 while (!is_same_group(*se, *pse)) {
203 *se = parent_entity(*se);
204 *pse = parent_entity(*pse);
205 }
206}
207
8f48894f
PZ
208#else /* !CONFIG_FAIR_GROUP_SCHED */
209
210static inline struct task_struct *task_of(struct sched_entity *se)
211{
212 return container_of(se, struct task_struct, se);
213}
bf0f6f24 214
62160e3f
IM
215static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
216{
217 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
218}
219
220#define entity_is_task(se) 1
221
b758149c
PZ
222#define for_each_sched_entity(se) \
223 for (; se; se = NULL)
bf0f6f24 224
b758149c 225static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 226{
b758149c 227 return &task_rq(p)->cfs;
bf0f6f24
IM
228}
229
b758149c
PZ
230static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
231{
232 struct task_struct *p = task_of(se);
233 struct rq *rq = task_rq(p);
234
235 return &rq->cfs;
236}
237
238/* runqueue "owned" by this group */
239static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
240{
241 return NULL;
242}
243
244static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
245{
246 return &cpu_rq(this_cpu)->cfs;
247}
248
249#define for_each_leaf_cfs_rq(rq, cfs_rq) \
250 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
251
252static inline int
253is_same_group(struct sched_entity *se, struct sched_entity *pse)
254{
255 return 1;
256}
257
258static inline struct sched_entity *parent_entity(struct sched_entity *se)
259{
260 return NULL;
261}
262
464b7527
PZ
263static inline void
264find_matching_se(struct sched_entity **se, struct sched_entity **pse)
265{
266}
267
b758149c
PZ
268#endif /* CONFIG_FAIR_GROUP_SCHED */
269
bf0f6f24
IM
270
271/**************************************************************
272 * Scheduling class tree data structure manipulation methods:
273 */
274
0702e3eb 275static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 276{
368059a9
PZ
277 s64 delta = (s64)(vruntime - min_vruntime);
278 if (delta > 0)
02e0431a
PZ
279 min_vruntime = vruntime;
280
281 return min_vruntime;
282}
283
0702e3eb 284static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
285{
286 s64 delta = (s64)(vruntime - min_vruntime);
287 if (delta < 0)
288 min_vruntime = vruntime;
289
290 return min_vruntime;
291}
292
54fdc581
FC
293static inline int entity_before(struct sched_entity *a,
294 struct sched_entity *b)
295{
296 return (s64)(a->vruntime - b->vruntime) < 0;
297}
298
0702e3eb 299static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
9014623c 300{
30cfdcfc 301 return se->vruntime - cfs_rq->min_vruntime;
9014623c
PZ
302}
303
1af5f730
PZ
304static void update_min_vruntime(struct cfs_rq *cfs_rq)
305{
306 u64 vruntime = cfs_rq->min_vruntime;
307
308 if (cfs_rq->curr)
309 vruntime = cfs_rq->curr->vruntime;
310
311 if (cfs_rq->rb_leftmost) {
312 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
313 struct sched_entity,
314 run_node);
315
e17036da 316 if (!cfs_rq->curr)
1af5f730
PZ
317 vruntime = se->vruntime;
318 else
319 vruntime = min_vruntime(vruntime, se->vruntime);
320 }
321
322 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
323}
324
bf0f6f24
IM
325/*
326 * Enqueue an entity into the rb-tree:
327 */
0702e3eb 328static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
329{
330 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
331 struct rb_node *parent = NULL;
332 struct sched_entity *entry;
9014623c 333 s64 key = entity_key(cfs_rq, se);
bf0f6f24
IM
334 int leftmost = 1;
335
336 /*
337 * Find the right place in the rbtree:
338 */
339 while (*link) {
340 parent = *link;
341 entry = rb_entry(parent, struct sched_entity, run_node);
342 /*
343 * We dont care about collisions. Nodes with
344 * the same key stay together.
345 */
9014623c 346 if (key < entity_key(cfs_rq, entry)) {
bf0f6f24
IM
347 link = &parent->rb_left;
348 } else {
349 link = &parent->rb_right;
350 leftmost = 0;
351 }
352 }
353
354 /*
355 * Maintain a cache of leftmost tree entries (it is frequently
356 * used):
357 */
1af5f730 358 if (leftmost)
57cb499d 359 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
360
361 rb_link_node(&se->run_node, parent, link);
362 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
363}
364
0702e3eb 365static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 366{
3fe69747
PZ
367 if (cfs_rq->rb_leftmost == &se->run_node) {
368 struct rb_node *next_node;
3fe69747
PZ
369
370 next_node = rb_next(&se->run_node);
371 cfs_rq->rb_leftmost = next_node;
3fe69747 372 }
e9acbff6 373
bf0f6f24 374 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
375}
376
bf0f6f24
IM
377static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
378{
f4b6755f
PZ
379 struct rb_node *left = cfs_rq->rb_leftmost;
380
381 if (!left)
382 return NULL;
383
384 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
385}
386
f4b6755f 387static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 388{
7eee3e67 389 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 390
70eee74b
BS
391 if (!last)
392 return NULL;
7eee3e67
IM
393
394 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
395}
396
bf0f6f24
IM
397/**************************************************************
398 * Scheduling class statistics methods:
399 */
400
b2be5e96 401#ifdef CONFIG_SCHED_DEBUG
acb4a848 402int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 403 void __user *buffer, size_t *lenp,
b2be5e96
PZ
404 loff_t *ppos)
405{
8d65af78 406 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 407 int factor = get_update_sysctl_factor();
b2be5e96
PZ
408
409 if (ret || !write)
410 return ret;
411
412 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
413 sysctl_sched_min_granularity);
414
acb4a848
CE
415#define WRT_SYSCTL(name) \
416 (normalized_sysctl_##name = sysctl_##name / (factor))
417 WRT_SYSCTL(sched_min_granularity);
418 WRT_SYSCTL(sched_latency);
419 WRT_SYSCTL(sched_wakeup_granularity);
420 WRT_SYSCTL(sched_shares_ratelimit);
421#undef WRT_SYSCTL
422
b2be5e96
PZ
423 return 0;
424}
425#endif
647e7cac 426
a7be37ac 427/*
f9c0b095 428 * delta /= w
a7be37ac
PZ
429 */
430static inline unsigned long
431calc_delta_fair(unsigned long delta, struct sched_entity *se)
432{
f9c0b095
PZ
433 if (unlikely(se->load.weight != NICE_0_LOAD))
434 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
435
436 return delta;
437}
438
647e7cac
IM
439/*
440 * The idea is to set a period in which each task runs once.
441 *
442 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
443 * this period because otherwise the slices get too small.
444 *
445 * p = (nr <= nl) ? l : l*nr/nl
446 */
4d78e7b6
PZ
447static u64 __sched_period(unsigned long nr_running)
448{
449 u64 period = sysctl_sched_latency;
b2be5e96 450 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
451
452 if (unlikely(nr_running > nr_latency)) {
4bf0b771 453 period = sysctl_sched_min_granularity;
4d78e7b6 454 period *= nr_running;
4d78e7b6
PZ
455 }
456
457 return period;
458}
459
647e7cac
IM
460/*
461 * We calculate the wall-time slice from the period by taking a part
462 * proportional to the weight.
463 *
f9c0b095 464 * s = p*P[w/rw]
647e7cac 465 */
6d0f0ebd 466static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 467{
0a582440 468 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 469
0a582440 470 for_each_sched_entity(se) {
6272d68c 471 struct load_weight *load;
3104bf03 472 struct load_weight lw;
6272d68c
LM
473
474 cfs_rq = cfs_rq_of(se);
475 load = &cfs_rq->load;
f9c0b095 476
0a582440 477 if (unlikely(!se->on_rq)) {
3104bf03 478 lw = cfs_rq->load;
0a582440
MG
479
480 update_load_add(&lw, se->load.weight);
481 load = &lw;
482 }
483 slice = calc_delta_mine(slice, se->load.weight, load);
484 }
485 return slice;
bf0f6f24
IM
486}
487
647e7cac 488/*
ac884dec 489 * We calculate the vruntime slice of a to be inserted task
647e7cac 490 *
f9c0b095 491 * vs = s/w
647e7cac 492 */
f9c0b095 493static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 494{
f9c0b095 495 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
496}
497
bf0f6f24
IM
498/*
499 * Update the current task's runtime statistics. Skip current tasks that
500 * are not in our scheduling class.
501 */
502static inline void
8ebc91d9
IM
503__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
504 unsigned long delta_exec)
bf0f6f24 505{
bbdba7c0 506 unsigned long delta_exec_weighted;
bf0f6f24 507
41acab88
LDM
508 schedstat_set(curr->statistics.exec_max,
509 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
510
511 curr->sum_exec_runtime += delta_exec;
7a62eabc 512 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 513 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 514
e9acbff6 515 curr->vruntime += delta_exec_weighted;
1af5f730 516 update_min_vruntime(cfs_rq);
bf0f6f24
IM
517}
518
b7cc0896 519static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 520{
429d43bc 521 struct sched_entity *curr = cfs_rq->curr;
8ebc91d9 522 u64 now = rq_of(cfs_rq)->clock;
bf0f6f24
IM
523 unsigned long delta_exec;
524
525 if (unlikely(!curr))
526 return;
527
528 /*
529 * Get the amount of time the current task was running
530 * since the last time we changed load (this cannot
531 * overflow on 32 bits):
532 */
8ebc91d9 533 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
534 if (!delta_exec)
535 return;
bf0f6f24 536
8ebc91d9
IM
537 __update_curr(cfs_rq, curr, delta_exec);
538 curr->exec_start = now;
d842de87
SV
539
540 if (entity_is_task(curr)) {
541 struct task_struct *curtask = task_of(curr);
542
f977bb49 543 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 544 cpuacct_charge(curtask, delta_exec);
f06febc9 545 account_group_exec_runtime(curtask, delta_exec);
d842de87 546 }
bf0f6f24
IM
547}
548
549static inline void
5870db5b 550update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 551{
41acab88 552 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
553}
554
bf0f6f24
IM
555/*
556 * Task is being enqueued - update stats:
557 */
d2417e5a 558static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 559{
bf0f6f24
IM
560 /*
561 * Are we enqueueing a waiting task? (for current tasks
562 * a dequeue/enqueue event is a NOP)
563 */
429d43bc 564 if (se != cfs_rq->curr)
5870db5b 565 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
566}
567
bf0f6f24 568static void
9ef0a961 569update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 570{
41acab88
LDM
571 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
572 rq_of(cfs_rq)->clock - se->statistics.wait_start));
573 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
574 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
575 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
576#ifdef CONFIG_SCHEDSTATS
577 if (entity_is_task(se)) {
578 trace_sched_stat_wait(task_of(se),
41acab88 579 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
580 }
581#endif
41acab88 582 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
583}
584
585static inline void
19b6a2e3 586update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 587{
bf0f6f24
IM
588 /*
589 * Mark the end of the wait period if dequeueing a
590 * waiting task:
591 */
429d43bc 592 if (se != cfs_rq->curr)
9ef0a961 593 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
594}
595
596/*
597 * We are picking a new current task - update its stats:
598 */
599static inline void
79303e9e 600update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
601{
602 /*
603 * We are starting a new run period:
604 */
d281918d 605 se->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
606}
607
bf0f6f24
IM
608/**************************************************
609 * Scheduling class queueing methods:
610 */
611
c09595f6
PZ
612#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
613static void
614add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
615{
616 cfs_rq->task_weight += weight;
617}
618#else
619static inline void
620add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
621{
622}
623#endif
624
30cfdcfc
DA
625static void
626account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
627{
628 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6
PZ
629 if (!parent_entity(se))
630 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 631 if (entity_is_task(se)) {
c09595f6 632 add_cfs_task_weight(cfs_rq, se->load.weight);
b87f1724
BR
633 list_add(&se->group_node, &cfs_rq->tasks);
634 }
30cfdcfc
DA
635 cfs_rq->nr_running++;
636 se->on_rq = 1;
637}
638
639static void
640account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
641{
642 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6
PZ
643 if (!parent_entity(se))
644 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 645 if (entity_is_task(se)) {
c09595f6 646 add_cfs_task_weight(cfs_rq, -se->load.weight);
b87f1724
BR
647 list_del_init(&se->group_node);
648 }
30cfdcfc
DA
649 cfs_rq->nr_running--;
650 se->on_rq = 0;
651}
652
2396af69 653static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 654{
bf0f6f24 655#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
656 struct task_struct *tsk = NULL;
657
658 if (entity_is_task(se))
659 tsk = task_of(se);
660
41acab88
LDM
661 if (se->statistics.sleep_start) {
662 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
bf0f6f24
IM
663
664 if ((s64)delta < 0)
665 delta = 0;
666
41acab88
LDM
667 if (unlikely(delta > se->statistics.sleep_max))
668 se->statistics.sleep_max = delta;
bf0f6f24 669
41acab88
LDM
670 se->statistics.sleep_start = 0;
671 se->statistics.sum_sleep_runtime += delta;
9745512c 672
768d0c27 673 if (tsk) {
e414314c 674 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
675 trace_sched_stat_sleep(tsk, delta);
676 }
bf0f6f24 677 }
41acab88
LDM
678 if (se->statistics.block_start) {
679 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
bf0f6f24
IM
680
681 if ((s64)delta < 0)
682 delta = 0;
683
41acab88
LDM
684 if (unlikely(delta > se->statistics.block_max))
685 se->statistics.block_max = delta;
bf0f6f24 686
41acab88
LDM
687 se->statistics.block_start = 0;
688 se->statistics.sum_sleep_runtime += delta;
30084fbd 689
e414314c 690 if (tsk) {
8f0dfc34 691 if (tsk->in_iowait) {
41acab88
LDM
692 se->statistics.iowait_sum += delta;
693 se->statistics.iowait_count++;
768d0c27 694 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
695 }
696
e414314c
PZ
697 /*
698 * Blocking time is in units of nanosecs, so shift by
699 * 20 to get a milliseconds-range estimation of the
700 * amount of time that the task spent sleeping:
701 */
702 if (unlikely(prof_on == SLEEP_PROFILING)) {
703 profile_hits(SLEEP_PROFILING,
704 (void *)get_wchan(tsk),
705 delta >> 20);
706 }
707 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 708 }
bf0f6f24
IM
709 }
710#endif
711}
712
ddc97297
PZ
713static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
714{
715#ifdef CONFIG_SCHED_DEBUG
716 s64 d = se->vruntime - cfs_rq->min_vruntime;
717
718 if (d < 0)
719 d = -d;
720
721 if (d > 3*sysctl_sched_latency)
722 schedstat_inc(cfs_rq, nr_spread_over);
723#endif
724}
725
aeb73b04
PZ
726static void
727place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
728{
1af5f730 729 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 730
2cb8600e
PZ
731 /*
732 * The 'current' period is already promised to the current tasks,
733 * however the extra weight of the new task will slow them down a
734 * little, place the new task so that it fits in the slot that
735 * stays open at the end.
736 */
94dfb5e7 737 if (initial && sched_feat(START_DEBIT))
f9c0b095 738 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 739
a2e7a7eb 740 /* sleeps up to a single latency don't count. */
5ca9880c 741 if (!initial) {
a2e7a7eb 742 unsigned long thresh = sysctl_sched_latency;
a7be37ac 743
a2e7a7eb
MG
744 /*
745 * Halve their sleep time's effect, to allow
746 * for a gentler effect of sleepers:
747 */
748 if (sched_feat(GENTLE_FAIR_SLEEPERS))
749 thresh >>= 1;
51e0304c 750
a2e7a7eb 751 vruntime -= thresh;
aeb73b04
PZ
752 }
753
b5d9d734
MG
754 /* ensure we never gain time by being placed backwards. */
755 vruntime = max_vruntime(se->vruntime, vruntime);
756
67e9fb2a 757 se->vruntime = vruntime;
aeb73b04
PZ
758}
759
bf0f6f24 760static void
88ec22d3 761enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 762{
88ec22d3
PZ
763 /*
764 * Update the normalized vruntime before updating min_vruntime
765 * through callig update_curr().
766 */
371fd7e7 767 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
768 se->vruntime += cfs_rq->min_vruntime;
769
bf0f6f24 770 /*
a2a2d680 771 * Update run-time statistics of the 'current'.
bf0f6f24 772 */
b7cc0896 773 update_curr(cfs_rq);
a992241d 774 account_entity_enqueue(cfs_rq, se);
bf0f6f24 775
88ec22d3 776 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 777 place_entity(cfs_rq, se, 0);
2396af69 778 enqueue_sleeper(cfs_rq, se);
e9acbff6 779 }
bf0f6f24 780
d2417e5a 781 update_stats_enqueue(cfs_rq, se);
ddc97297 782 check_spread(cfs_rq, se);
83b699ed
SV
783 if (se != cfs_rq->curr)
784 __enqueue_entity(cfs_rq, se);
bf0f6f24
IM
785}
786
a571bbea 787static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2002c695 788{
de69a80b 789 if (!se || cfs_rq->last == se)
2002c695
PZ
790 cfs_rq->last = NULL;
791
de69a80b 792 if (!se || cfs_rq->next == se)
2002c695
PZ
793 cfs_rq->next = NULL;
794}
795
a571bbea
PZ
796static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
797{
798 for_each_sched_entity(se)
799 __clear_buddies(cfs_rq_of(se), se);
800}
801
bf0f6f24 802static void
371fd7e7 803dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 804{
a2a2d680
DA
805 /*
806 * Update run-time statistics of the 'current'.
807 */
808 update_curr(cfs_rq);
809
19b6a2e3 810 update_stats_dequeue(cfs_rq, se);
371fd7e7 811 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 812#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
813 if (entity_is_task(se)) {
814 struct task_struct *tsk = task_of(se);
815
816 if (tsk->state & TASK_INTERRUPTIBLE)
41acab88 817 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 818 if (tsk->state & TASK_UNINTERRUPTIBLE)
41acab88 819 se->statistics.block_start = rq_of(cfs_rq)->clock;
bf0f6f24 820 }
db36cc7d 821#endif
67e9fb2a
PZ
822 }
823
2002c695 824 clear_buddies(cfs_rq, se);
4793241b 825
83b699ed 826 if (se != cfs_rq->curr)
30cfdcfc
DA
827 __dequeue_entity(cfs_rq, se);
828 account_entity_dequeue(cfs_rq, se);
1af5f730 829 update_min_vruntime(cfs_rq);
88ec22d3
PZ
830
831 /*
832 * Normalize the entity after updating the min_vruntime because the
833 * update can refer to the ->curr item and we need to reflect this
834 * movement in our normalized position.
835 */
371fd7e7 836 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 837 se->vruntime -= cfs_rq->min_vruntime;
bf0f6f24
IM
838}
839
840/*
841 * Preempt the current task with a newly woken task if needed:
842 */
7c92e54f 843static void
2e09bf55 844check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 845{
11697830
PZ
846 unsigned long ideal_runtime, delta_exec;
847
6d0f0ebd 848 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 849 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 850 if (delta_exec > ideal_runtime) {
bf0f6f24 851 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
852 /*
853 * The current task ran long enough, ensure it doesn't get
854 * re-elected due to buddy favours.
855 */
856 clear_buddies(cfs_rq, curr);
f685ceac
MG
857 return;
858 }
859
860 /*
861 * Ensure that a task that missed wakeup preemption by a
862 * narrow margin doesn't have to wait for a full slice.
863 * This also mitigates buddy induced latencies under load.
864 */
865 if (!sched_feat(WAKEUP_PREEMPT))
866 return;
867
868 if (delta_exec < sysctl_sched_min_granularity)
869 return;
870
871 if (cfs_rq->nr_running > 1) {
872 struct sched_entity *se = __pick_next_entity(cfs_rq);
873 s64 delta = curr->vruntime - se->vruntime;
874
875 if (delta > ideal_runtime)
876 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5 877 }
bf0f6f24
IM
878}
879
83b699ed 880static void
8494f412 881set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 882{
83b699ed
SV
883 /* 'current' is not kept within the tree. */
884 if (se->on_rq) {
885 /*
886 * Any task has to be enqueued before it get to execute on
887 * a CPU. So account for the time it spent waiting on the
888 * runqueue.
889 */
890 update_stats_wait_end(cfs_rq, se);
891 __dequeue_entity(cfs_rq, se);
892 }
893
79303e9e 894 update_stats_curr_start(cfs_rq, se);
429d43bc 895 cfs_rq->curr = se;
eba1ed4b
IM
896#ifdef CONFIG_SCHEDSTATS
897 /*
898 * Track our maximum slice length, if the CPU's load is at
899 * least twice that of our own weight (i.e. dont track it
900 * when there are only lesser-weight tasks around):
901 */
495eca49 902 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 903 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
904 se->sum_exec_runtime - se->prev_sum_exec_runtime);
905 }
906#endif
4a55b450 907 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
908}
909
3f3a4904
PZ
910static int
911wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
912
f4b6755f 913static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 914{
f4b6755f 915 struct sched_entity *se = __pick_next_entity(cfs_rq);
f685ceac 916 struct sched_entity *left = se;
f4b6755f 917
f685ceac
MG
918 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
919 se = cfs_rq->next;
aa2ac252 920
f685ceac
MG
921 /*
922 * Prefer last buddy, try to return the CPU to a preempted task.
923 */
924 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
925 se = cfs_rq->last;
926
927 clear_buddies(cfs_rq, se);
4793241b
PZ
928
929 return se;
aa2ac252
PZ
930}
931
ab6cde26 932static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
933{
934 /*
935 * If still on the runqueue then deactivate_task()
936 * was not called and update_curr() has to be done:
937 */
938 if (prev->on_rq)
b7cc0896 939 update_curr(cfs_rq);
bf0f6f24 940
ddc97297 941 check_spread(cfs_rq, prev);
30cfdcfc 942 if (prev->on_rq) {
5870db5b 943 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
944 /* Put 'current' back into the tree. */
945 __enqueue_entity(cfs_rq, prev);
946 }
429d43bc 947 cfs_rq->curr = NULL;
bf0f6f24
IM
948}
949
8f4d37ec
PZ
950static void
951entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 952{
bf0f6f24 953 /*
30cfdcfc 954 * Update run-time statistics of the 'current'.
bf0f6f24 955 */
30cfdcfc 956 update_curr(cfs_rq);
bf0f6f24 957
8f4d37ec
PZ
958#ifdef CONFIG_SCHED_HRTICK
959 /*
960 * queued ticks are scheduled to match the slice, so don't bother
961 * validating it and just reschedule.
962 */
983ed7a6
HH
963 if (queued) {
964 resched_task(rq_of(cfs_rq)->curr);
965 return;
966 }
8f4d37ec
PZ
967 /*
968 * don't let the period tick interfere with the hrtick preemption
969 */
970 if (!sched_feat(DOUBLE_TICK) &&
971 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
972 return;
973#endif
974
ce6c1311 975 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
2e09bf55 976 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
977}
978
979/**************************************************
980 * CFS operations on tasks:
981 */
982
8f4d37ec
PZ
983#ifdef CONFIG_SCHED_HRTICK
984static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
985{
8f4d37ec
PZ
986 struct sched_entity *se = &p->se;
987 struct cfs_rq *cfs_rq = cfs_rq_of(se);
988
989 WARN_ON(task_rq(p) != rq);
990
991 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
992 u64 slice = sched_slice(cfs_rq, se);
993 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
994 s64 delta = slice - ran;
995
996 if (delta < 0) {
997 if (rq->curr == p)
998 resched_task(p);
999 return;
1000 }
1001
1002 /*
1003 * Don't schedule slices shorter than 10000ns, that just
1004 * doesn't make sense. Rely on vruntime for fairness.
1005 */
31656519 1006 if (rq->curr != p)
157124c1 1007 delta = max_t(s64, 10000LL, delta);
8f4d37ec 1008
31656519 1009 hrtick_start(rq, delta);
8f4d37ec
PZ
1010 }
1011}
a4c2f00f
PZ
1012
1013/*
1014 * called from enqueue/dequeue and updates the hrtick when the
1015 * current task is from our class and nr_running is low enough
1016 * to matter.
1017 */
1018static void hrtick_update(struct rq *rq)
1019{
1020 struct task_struct *curr = rq->curr;
1021
1022 if (curr->sched_class != &fair_sched_class)
1023 return;
1024
1025 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1026 hrtick_start_fair(rq, curr);
1027}
55e12e5e 1028#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
1029static inline void
1030hrtick_start_fair(struct rq *rq, struct task_struct *p)
1031{
1032}
a4c2f00f
PZ
1033
1034static inline void hrtick_update(struct rq *rq)
1035{
1036}
8f4d37ec
PZ
1037#endif
1038
bf0f6f24
IM
1039/*
1040 * The enqueue_task method is called before nr_running is
1041 * increased. Here we update the fair scheduling stats and
1042 * then put the task into the rbtree:
1043 */
ea87bb78 1044static void
371fd7e7 1045enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
1046{
1047 struct cfs_rq *cfs_rq;
62fb1851 1048 struct sched_entity *se = &p->se;
bf0f6f24
IM
1049
1050 for_each_sched_entity(se) {
62fb1851 1051 if (se->on_rq)
bf0f6f24
IM
1052 break;
1053 cfs_rq = cfs_rq_of(se);
88ec22d3
PZ
1054 enqueue_entity(cfs_rq, se, flags);
1055 flags = ENQUEUE_WAKEUP;
bf0f6f24 1056 }
8f4d37ec 1057
a4c2f00f 1058 hrtick_update(rq);
bf0f6f24
IM
1059}
1060
1061/*
1062 * The dequeue_task method is called before nr_running is
1063 * decreased. We remove the task from the rbtree and
1064 * update the fair scheduling stats:
1065 */
371fd7e7 1066static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
1067{
1068 struct cfs_rq *cfs_rq;
62fb1851 1069 struct sched_entity *se = &p->se;
bf0f6f24
IM
1070
1071 for_each_sched_entity(se) {
1072 cfs_rq = cfs_rq_of(se);
371fd7e7 1073 dequeue_entity(cfs_rq, se, flags);
bf0f6f24 1074 /* Don't dequeue parent if it has other entities besides us */
62fb1851 1075 if (cfs_rq->load.weight)
bf0f6f24 1076 break;
371fd7e7 1077 flags |= DEQUEUE_SLEEP;
bf0f6f24 1078 }
8f4d37ec 1079
a4c2f00f 1080 hrtick_update(rq);
bf0f6f24
IM
1081}
1082
1083/*
1799e35d
IM
1084 * sched_yield() support is very simple - we dequeue and enqueue.
1085 *
1086 * If compat_yield is turned on then we requeue to the end of the tree.
bf0f6f24 1087 */
4530d7ab 1088static void yield_task_fair(struct rq *rq)
bf0f6f24 1089{
db292ca3
IM
1090 struct task_struct *curr = rq->curr;
1091 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1092 struct sched_entity *rightmost, *se = &curr->se;
bf0f6f24
IM
1093
1094 /*
1799e35d
IM
1095 * Are we the only task in the tree?
1096 */
1097 if (unlikely(cfs_rq->nr_running == 1))
1098 return;
1099
2002c695
PZ
1100 clear_buddies(cfs_rq, se);
1101
db292ca3 1102 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
3e51f33f 1103 update_rq_clock(rq);
1799e35d 1104 /*
a2a2d680 1105 * Update run-time statistics of the 'current'.
1799e35d 1106 */
2b1e315d 1107 update_curr(cfs_rq);
1799e35d
IM
1108
1109 return;
1110 }
1111 /*
1112 * Find the rightmost entry in the rbtree:
bf0f6f24 1113 */
2b1e315d 1114 rightmost = __pick_last_entity(cfs_rq);
1799e35d
IM
1115 /*
1116 * Already in the rightmost position?
1117 */
54fdc581 1118 if (unlikely(!rightmost || entity_before(rightmost, se)))
1799e35d
IM
1119 return;
1120
1121 /*
1122 * Minimally necessary key value to be last in the tree:
2b1e315d
DA
1123 * Upon rescheduling, sched_class::put_prev_task() will place
1124 * 'current' within the tree based on its new key value.
1799e35d 1125 */
30cfdcfc 1126 se->vruntime = rightmost->vruntime + 1;
bf0f6f24
IM
1127}
1128
e7693a36 1129#ifdef CONFIG_SMP
098fb9db 1130
88ec22d3
PZ
1131static void task_waking_fair(struct rq *rq, struct task_struct *p)
1132{
1133 struct sched_entity *se = &p->se;
1134 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1135
1136 se->vruntime -= cfs_rq->min_vruntime;
1137}
1138
bb3469ac 1139#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
1140/*
1141 * effective_load() calculates the load change as seen from the root_task_group
1142 *
1143 * Adding load to a group doesn't make a group heavier, but can cause movement
1144 * of group shares between cpus. Assuming the shares were perfectly aligned one
1145 * can calculate the shift in shares.
1146 *
1147 * The problem is that perfectly aligning the shares is rather expensive, hence
1148 * we try to avoid doing that too often - see update_shares(), which ratelimits
1149 * this change.
1150 *
1151 * We compensate this by not only taking the current delta into account, but
1152 * also considering the delta between when the shares were last adjusted and
1153 * now.
1154 *
1155 * We still saw a performance dip, some tracing learned us that between
1156 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1157 * significantly. Therefore try to bias the error in direction of failing
1158 * the affine wakeup.
1159 *
1160 */
f1d239f7
PZ
1161static long effective_load(struct task_group *tg, int cpu,
1162 long wl, long wg)
bb3469ac 1163{
4be9daaa 1164 struct sched_entity *se = tg->se[cpu];
f1d239f7
PZ
1165
1166 if (!tg->parent)
1167 return wl;
1168
f5bfb7d9
PZ
1169 /*
1170 * By not taking the decrease of shares on the other cpu into
1171 * account our error leans towards reducing the affine wakeups.
1172 */
1173 if (!wl && sched_feat(ASYM_EFF_LOAD))
1174 return wl;
1175
4be9daaa 1176 for_each_sched_entity(se) {
cb5ef42a 1177 long S, rw, s, a, b;
940959e9
PZ
1178 long more_w;
1179
1180 /*
1181 * Instead of using this increment, also add the difference
1182 * between when the shares were last updated and now.
1183 */
1184 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1185 wl += more_w;
1186 wg += more_w;
4be9daaa
PZ
1187
1188 S = se->my_q->tg->shares;
1189 s = se->my_q->shares;
f1d239f7 1190 rw = se->my_q->rq_weight;
bb3469ac 1191
cb5ef42a
PZ
1192 a = S*(rw + wl);
1193 b = S*rw + s*wg;
4be9daaa 1194
940959e9
PZ
1195 wl = s*(a-b);
1196
1197 if (likely(b))
1198 wl /= b;
1199
83378269
PZ
1200 /*
1201 * Assume the group is already running and will
1202 * thus already be accounted for in the weight.
1203 *
1204 * That is, moving shares between CPUs, does not
1205 * alter the group weight.
1206 */
4be9daaa 1207 wg = 0;
4be9daaa 1208 }
bb3469ac 1209
4be9daaa 1210 return wl;
bb3469ac 1211}
4be9daaa 1212
bb3469ac 1213#else
4be9daaa 1214
83378269
PZ
1215static inline unsigned long effective_load(struct task_group *tg, int cpu,
1216 unsigned long wl, unsigned long wg)
4be9daaa 1217{
83378269 1218 return wl;
bb3469ac 1219}
4be9daaa 1220
bb3469ac
PZ
1221#endif
1222
c88d5910 1223static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 1224{
c88d5910
PZ
1225 unsigned long this_load, load;
1226 int idx, this_cpu, prev_cpu;
098fb9db 1227 unsigned long tl_per_task;
c88d5910 1228 struct task_group *tg;
83378269 1229 unsigned long weight;
b3137bc8 1230 int balanced;
098fb9db 1231
c88d5910
PZ
1232 idx = sd->wake_idx;
1233 this_cpu = smp_processor_id();
1234 prev_cpu = task_cpu(p);
1235 load = source_load(prev_cpu, idx);
1236 this_load = target_load(this_cpu, idx);
098fb9db 1237
b3137bc8
MG
1238 /*
1239 * If sync wakeup then subtract the (maximum possible)
1240 * effect of the currently running task from the load
1241 * of the current CPU:
1242 */
f3b577de 1243 rcu_read_lock();
83378269
PZ
1244 if (sync) {
1245 tg = task_group(current);
1246 weight = current->se.load.weight;
1247
c88d5910 1248 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
1249 load += effective_load(tg, prev_cpu, 0, -weight);
1250 }
b3137bc8 1251
83378269
PZ
1252 tg = task_group(p);
1253 weight = p->se.load.weight;
b3137bc8 1254
71a29aa7
PZ
1255 /*
1256 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
1257 * due to the sync cause above having dropped this_load to 0, we'll
1258 * always have an imbalance, but there's really nothing you can do
1259 * about that, so that's good too.
71a29aa7
PZ
1260 *
1261 * Otherwise check if either cpus are near enough in load to allow this
1262 * task to be woken on this_cpu.
1263 */
e51fd5e2
PZ
1264 if (this_load) {
1265 unsigned long this_eff_load, prev_eff_load;
1266
1267 this_eff_load = 100;
1268 this_eff_load *= power_of(prev_cpu);
1269 this_eff_load *= this_load +
1270 effective_load(tg, this_cpu, weight, weight);
1271
1272 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
1273 prev_eff_load *= power_of(this_cpu);
1274 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
1275
1276 balanced = this_eff_load <= prev_eff_load;
1277 } else
1278 balanced = true;
f3b577de 1279 rcu_read_unlock();
b3137bc8 1280
098fb9db 1281 /*
4ae7d5ce
IM
1282 * If the currently running task will sleep within
1283 * a reasonable amount of time then attract this newly
1284 * woken task:
098fb9db 1285 */
2fb7635c
PZ
1286 if (sync && balanced)
1287 return 1;
098fb9db 1288
41acab88 1289 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
1290 tl_per_task = cpu_avg_load_per_task(this_cpu);
1291
c88d5910
PZ
1292 if (balanced ||
1293 (this_load <= load &&
1294 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
1295 /*
1296 * This domain has SD_WAKE_AFFINE and
1297 * p is cache cold in this domain, and
1298 * there is no bad imbalance.
1299 */
c88d5910 1300 schedstat_inc(sd, ttwu_move_affine);
41acab88 1301 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
1302
1303 return 1;
1304 }
1305 return 0;
1306}
1307
aaee1203
PZ
1308/*
1309 * find_idlest_group finds and returns the least busy CPU group within the
1310 * domain.
1311 */
1312static struct sched_group *
78e7ed53 1313find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 1314 int this_cpu, int load_idx)
e7693a36 1315{
aaee1203
PZ
1316 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1317 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 1319
aaee1203
PZ
1320 do {
1321 unsigned long load, avg_load;
1322 int local_group;
1323 int i;
e7693a36 1324
aaee1203
PZ
1325 /* Skip over this group if it has no CPUs allowed */
1326 if (!cpumask_intersects(sched_group_cpus(group),
1327 &p->cpus_allowed))
1328 continue;
1329
1330 local_group = cpumask_test_cpu(this_cpu,
1331 sched_group_cpus(group));
1332
1333 /* Tally up the load of all CPUs in the group */
1334 avg_load = 0;
1335
1336 for_each_cpu(i, sched_group_cpus(group)) {
1337 /* Bias balancing toward cpus of our domain */
1338 if (local_group)
1339 load = source_load(i, load_idx);
1340 else
1341 load = target_load(i, load_idx);
1342
1343 avg_load += load;
1344 }
1345
1346 /* Adjust by relative CPU power of the group */
1347 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1348
1349 if (local_group) {
1350 this_load = avg_load;
1351 this = group;
1352 } else if (avg_load < min_load) {
1353 min_load = avg_load;
1354 idlest = group;
1355 }
1356 } while (group = group->next, group != sd->groups);
1357
1358 if (!idlest || 100*this_load < imbalance*min_load)
1359 return NULL;
1360 return idlest;
1361}
1362
1363/*
1364 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1365 */
1366static int
1367find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1368{
1369 unsigned long load, min_load = ULONG_MAX;
1370 int idlest = -1;
1371 int i;
1372
1373 /* Traverse only the allowed CPUs */
1374 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1375 load = weighted_cpuload(i);
1376
1377 if (load < min_load || (load == min_load && i == this_cpu)) {
1378 min_load = load;
1379 idlest = i;
e7693a36
GH
1380 }
1381 }
1382
aaee1203
PZ
1383 return idlest;
1384}
e7693a36 1385
a50bde51
PZ
1386/*
1387 * Try and locate an idle CPU in the sched_domain.
1388 */
99bd5e2f 1389static int select_idle_sibling(struct task_struct *p, int target)
a50bde51
PZ
1390{
1391 int cpu = smp_processor_id();
1392 int prev_cpu = task_cpu(p);
99bd5e2f 1393 struct sched_domain *sd;
a50bde51
PZ
1394 int i;
1395
1396 /*
99bd5e2f
SS
1397 * If the task is going to be woken-up on this cpu and if it is
1398 * already idle, then it is the right target.
a50bde51 1399 */
99bd5e2f
SS
1400 if (target == cpu && idle_cpu(cpu))
1401 return cpu;
1402
1403 /*
1404 * If the task is going to be woken-up on the cpu where it previously
1405 * ran and if it is currently idle, then it the right target.
1406 */
1407 if (target == prev_cpu && idle_cpu(prev_cpu))
fe3bcfe1 1408 return prev_cpu;
a50bde51
PZ
1409
1410 /*
99bd5e2f 1411 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 1412 */
99bd5e2f
SS
1413 for_each_domain(target, sd) {
1414 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
fe3bcfe1 1415 break;
99bd5e2f
SS
1416
1417 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1418 if (idle_cpu(i)) {
1419 target = i;
1420 break;
1421 }
a50bde51 1422 }
99bd5e2f
SS
1423
1424 /*
1425 * Lets stop looking for an idle sibling when we reached
1426 * the domain that spans the current cpu and prev_cpu.
1427 */
1428 if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
1429 cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
1430 break;
a50bde51
PZ
1431 }
1432
1433 return target;
1434}
1435
aaee1203
PZ
1436/*
1437 * sched_balance_self: balance the current task (running on cpu) in domains
1438 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1439 * SD_BALANCE_EXEC.
1440 *
1441 * Balance, ie. select the least loaded group.
1442 *
1443 * Returns the target CPU number, or the same CPU if no balancing is needed.
1444 *
1445 * preempt must be disabled.
1446 */
0017d735
PZ
1447static int
1448select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 1449{
29cd8bae 1450 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
1451 int cpu = smp_processor_id();
1452 int prev_cpu = task_cpu(p);
1453 int new_cpu = cpu;
99bd5e2f 1454 int want_affine = 0;
29cd8bae 1455 int want_sd = 1;
5158f4e4 1456 int sync = wake_flags & WF_SYNC;
c88d5910 1457
0763a660 1458 if (sd_flag & SD_BALANCE_WAKE) {
beac4c7e 1459 if (cpumask_test_cpu(cpu, &p->cpus_allowed))
c88d5910
PZ
1460 want_affine = 1;
1461 new_cpu = prev_cpu;
1462 }
aaee1203
PZ
1463
1464 for_each_domain(cpu, tmp) {
e4f42888
PZ
1465 if (!(tmp->flags & SD_LOAD_BALANCE))
1466 continue;
1467
aaee1203 1468 /*
ae154be1
PZ
1469 * If power savings logic is enabled for a domain, see if we
1470 * are not overloaded, if so, don't balance wider.
aaee1203 1471 */
59abf026 1472 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
ae154be1
PZ
1473 unsigned long power = 0;
1474 unsigned long nr_running = 0;
1475 unsigned long capacity;
1476 int i;
1477
1478 for_each_cpu(i, sched_domain_span(tmp)) {
1479 power += power_of(i);
1480 nr_running += cpu_rq(i)->cfs.nr_running;
1481 }
1482
1483 capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
1484
59abf026
PZ
1485 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1486 nr_running /= 2;
1487
1488 if (nr_running < capacity)
29cd8bae 1489 want_sd = 0;
ae154be1 1490 }
aaee1203 1491
fe3bcfe1 1492 /*
99bd5e2f
SS
1493 * If both cpu and prev_cpu are part of this domain,
1494 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 1495 */
99bd5e2f
SS
1496 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1497 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1498 affine_sd = tmp;
1499 want_affine = 0;
c88d5910
PZ
1500 }
1501
29cd8bae
PZ
1502 if (!want_sd && !want_affine)
1503 break;
1504
0763a660 1505 if (!(tmp->flags & sd_flag))
c88d5910
PZ
1506 continue;
1507
29cd8bae
PZ
1508 if (want_sd)
1509 sd = tmp;
1510 }
1511
8b911acd 1512#ifdef CONFIG_FAIR_GROUP_SCHED
29cd8bae
PZ
1513 if (sched_feat(LB_SHARES_UPDATE)) {
1514 /*
1515 * Pick the largest domain to update shares over
1516 */
1517 tmp = sd;
669c55e9 1518 if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
29cd8bae
PZ
1519 tmp = affine_sd;
1520
0017d735
PZ
1521 if (tmp) {
1522 raw_spin_unlock(&rq->lock);
29cd8bae 1523 update_shares(tmp);
0017d735
PZ
1524 raw_spin_lock(&rq->lock);
1525 }
c88d5910 1526 }
8b911acd 1527#endif
aaee1203 1528
8b911acd 1529 if (affine_sd) {
99bd5e2f
SS
1530 if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1531 return select_idle_sibling(p, cpu);
1532 else
1533 return select_idle_sibling(p, prev_cpu);
8b911acd 1534 }
e7693a36 1535
aaee1203 1536 while (sd) {
5158f4e4 1537 int load_idx = sd->forkexec_idx;
aaee1203 1538 struct sched_group *group;
c88d5910 1539 int weight;
098fb9db 1540
0763a660 1541 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
1542 sd = sd->child;
1543 continue;
1544 }
098fb9db 1545
5158f4e4
PZ
1546 if (sd_flag & SD_BALANCE_WAKE)
1547 load_idx = sd->wake_idx;
098fb9db 1548
5158f4e4 1549 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
1550 if (!group) {
1551 sd = sd->child;
1552 continue;
1553 }
4ae7d5ce 1554
d7c33c49 1555 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
1556 if (new_cpu == -1 || new_cpu == cpu) {
1557 /* Now try balancing at a lower domain level of cpu */
1558 sd = sd->child;
1559 continue;
e7693a36 1560 }
aaee1203
PZ
1561
1562 /* Now try balancing at a lower domain level of new_cpu */
1563 cpu = new_cpu;
669c55e9 1564 weight = sd->span_weight;
aaee1203
PZ
1565 sd = NULL;
1566 for_each_domain(cpu, tmp) {
669c55e9 1567 if (weight <= tmp->span_weight)
aaee1203 1568 break;
0763a660 1569 if (tmp->flags & sd_flag)
aaee1203
PZ
1570 sd = tmp;
1571 }
1572 /* while loop will break here if sd == NULL */
e7693a36
GH
1573 }
1574
c88d5910 1575 return new_cpu;
e7693a36
GH
1576}
1577#endif /* CONFIG_SMP */
1578
e52fb7c0
PZ
1579static unsigned long
1580wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
1581{
1582 unsigned long gran = sysctl_sched_wakeup_granularity;
1583
1584 /*
e52fb7c0
PZ
1585 * Since its curr running now, convert the gran from real-time
1586 * to virtual-time in his units.
13814d42
MG
1587 *
1588 * By using 'se' instead of 'curr' we penalize light tasks, so
1589 * they get preempted easier. That is, if 'se' < 'curr' then
1590 * the resulting gran will be larger, therefore penalizing the
1591 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1592 * be smaller, again penalizing the lighter task.
1593 *
1594 * This is especially important for buddies when the leftmost
1595 * task is higher priority than the buddy.
0bbd3336 1596 */
13814d42
MG
1597 if (unlikely(se->load.weight != NICE_0_LOAD))
1598 gran = calc_delta_fair(gran, se);
0bbd3336
PZ
1599
1600 return gran;
1601}
1602
464b7527
PZ
1603/*
1604 * Should 'se' preempt 'curr'.
1605 *
1606 * |s1
1607 * |s2
1608 * |s3
1609 * g
1610 * |<--->|c
1611 *
1612 * w(c, s1) = -1
1613 * w(c, s2) = 0
1614 * w(c, s3) = 1
1615 *
1616 */
1617static int
1618wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1619{
1620 s64 gran, vdiff = curr->vruntime - se->vruntime;
1621
1622 if (vdiff <= 0)
1623 return -1;
1624
e52fb7c0 1625 gran = wakeup_gran(curr, se);
464b7527
PZ
1626 if (vdiff > gran)
1627 return 1;
1628
1629 return 0;
1630}
1631
02479099
PZ
1632static void set_last_buddy(struct sched_entity *se)
1633{
6bc912b7
PZ
1634 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1635 for_each_sched_entity(se)
1636 cfs_rq_of(se)->last = se;
1637 }
02479099
PZ
1638}
1639
1640static void set_next_buddy(struct sched_entity *se)
1641{
6bc912b7
PZ
1642 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1643 for_each_sched_entity(se)
1644 cfs_rq_of(se)->next = se;
1645 }
02479099
PZ
1646}
1647
bf0f6f24
IM
1648/*
1649 * Preempt the current task with a newly woken task if needed:
1650 */
5a9b86f6 1651static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
1652{
1653 struct task_struct *curr = rq->curr;
8651a86c 1654 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 1655 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 1656 int scale = cfs_rq->nr_running >= sched_nr_latency;
bf0f6f24 1657
3a7e73a2
PZ
1658 if (unlikely(rt_prio(p->prio)))
1659 goto preempt;
aa2ac252 1660
d95f98d0
PZ
1661 if (unlikely(p->sched_class != &fair_sched_class))
1662 return;
1663
4ae7d5ce
IM
1664 if (unlikely(se == pse))
1665 return;
1666
f685ceac 1667 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
3cb63d52 1668 set_next_buddy(pse);
57fdc26d 1669
aec0a514
BR
1670 /*
1671 * We can come here with TIF_NEED_RESCHED already set from new task
1672 * wake up path.
1673 */
1674 if (test_tsk_need_resched(curr))
1675 return;
1676
91c234b4 1677 /*
6bc912b7 1678 * Batch and idle tasks do not preempt (their preemption is driven by
91c234b4
IM
1679 * the tick):
1680 */
6bc912b7 1681 if (unlikely(p->policy != SCHED_NORMAL))
91c234b4 1682 return;
bf0f6f24 1683
6bc912b7 1684 /* Idle tasks are by definition preempted by everybody. */
3a7e73a2
PZ
1685 if (unlikely(curr->policy == SCHED_IDLE))
1686 goto preempt;
bf0f6f24 1687
ad4b78bb
PZ
1688 if (!sched_feat(WAKEUP_PREEMPT))
1689 return;
1690
3a7e73a2 1691 update_curr(cfs_rq);
464b7527 1692 find_matching_se(&se, &pse);
002f128b 1693 BUG_ON(!pse);
3a7e73a2
PZ
1694 if (wakeup_preempt_entity(se, pse) == 1)
1695 goto preempt;
464b7527 1696
3a7e73a2 1697 return;
a65ac745 1698
3a7e73a2
PZ
1699preempt:
1700 resched_task(curr);
1701 /*
1702 * Only set the backward buddy when the current task is still
1703 * on the rq. This can happen when a wakeup gets interleaved
1704 * with schedule on the ->pre_schedule() or idle_balance()
1705 * point, either of which can * drop the rq lock.
1706 *
1707 * Also, during early boot the idle thread is in the fair class,
1708 * for obvious reasons its a bad idea to schedule back to it.
1709 */
1710 if (unlikely(!se->on_rq || curr == rq->idle))
1711 return;
1712
1713 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1714 set_last_buddy(se);
bf0f6f24
IM
1715}
1716
fb8d4724 1717static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 1718{
8f4d37ec 1719 struct task_struct *p;
bf0f6f24
IM
1720 struct cfs_rq *cfs_rq = &rq->cfs;
1721 struct sched_entity *se;
1722
36ace27e 1723 if (!cfs_rq->nr_running)
bf0f6f24
IM
1724 return NULL;
1725
1726 do {
9948f4b2 1727 se = pick_next_entity(cfs_rq);
f4b6755f 1728 set_next_entity(cfs_rq, se);
bf0f6f24
IM
1729 cfs_rq = group_cfs_rq(se);
1730 } while (cfs_rq);
1731
8f4d37ec
PZ
1732 p = task_of(se);
1733 hrtick_start_fair(rq, p);
1734
1735 return p;
bf0f6f24
IM
1736}
1737
1738/*
1739 * Account for a descheduled task:
1740 */
31ee529c 1741static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
1742{
1743 struct sched_entity *se = &prev->se;
1744 struct cfs_rq *cfs_rq;
1745
1746 for_each_sched_entity(se) {
1747 cfs_rq = cfs_rq_of(se);
ab6cde26 1748 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
1749 }
1750}
1751
681f3e68 1752#ifdef CONFIG_SMP
bf0f6f24
IM
1753/**************************************************
1754 * Fair scheduling class load-balancing methods:
1755 */
1756
1e3c88bd
PZ
1757/*
1758 * pull_task - move a task from a remote runqueue to the local runqueue.
1759 * Both runqueues must be locked.
1760 */
1761static void pull_task(struct rq *src_rq, struct task_struct *p,
1762 struct rq *this_rq, int this_cpu)
1763{
1764 deactivate_task(src_rq, p, 0);
1765 set_task_cpu(p, this_cpu);
1766 activate_task(this_rq, p, 0);
1767 check_preempt_curr(this_rq, p, 0);
1768}
1769
1770/*
1771 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1772 */
1773static
1774int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1775 struct sched_domain *sd, enum cpu_idle_type idle,
1776 int *all_pinned)
1777{
1778 int tsk_cache_hot = 0;
1779 /*
1780 * We do not migrate tasks that are:
1781 * 1) running (obviously), or
1782 * 2) cannot be migrated to this CPU due to cpus_allowed, or
1783 * 3) are cache-hot on their current CPU.
1784 */
1785 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
41acab88 1786 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
1e3c88bd
PZ
1787 return 0;
1788 }
1789 *all_pinned = 0;
1790
1791 if (task_running(rq, p)) {
41acab88 1792 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
1793 return 0;
1794 }
1795
1796 /*
1797 * Aggressive migration if:
1798 * 1) task is cache cold, or
1799 * 2) too many balance attempts have failed.
1800 */
1801
1802 tsk_cache_hot = task_hot(p, rq->clock, sd);
1803 if (!tsk_cache_hot ||
1804 sd->nr_balance_failed > sd->cache_nice_tries) {
1805#ifdef CONFIG_SCHEDSTATS
1806 if (tsk_cache_hot) {
1807 schedstat_inc(sd, lb_hot_gained[idle]);
41acab88 1808 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd
PZ
1809 }
1810#endif
1811 return 1;
1812 }
1813
1814 if (tsk_cache_hot) {
41acab88 1815 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
1e3c88bd
PZ
1816 return 0;
1817 }
1818 return 1;
1819}
1820
897c395f
PZ
1821/*
1822 * move_one_task tries to move exactly one task from busiest to this_rq, as
1823 * part of active balancing operations within "domain".
1824 * Returns 1 if successful and 0 otherwise.
1825 *
1826 * Called with both runqueues locked.
1827 */
1828static int
1829move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1830 struct sched_domain *sd, enum cpu_idle_type idle)
1831{
1832 struct task_struct *p, *n;
1833 struct cfs_rq *cfs_rq;
1834 int pinned = 0;
1835
1836 for_each_leaf_cfs_rq(busiest, cfs_rq) {
1837 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
1838
1839 if (!can_migrate_task(p, busiest, this_cpu,
1840 sd, idle, &pinned))
1841 continue;
1842
1843 pull_task(busiest, p, this_rq, this_cpu);
1844 /*
1845 * Right now, this is only the second place pull_task()
1846 * is called, so we can safely collect pull_task()
1847 * stats here rather than inside pull_task().
1848 */
1849 schedstat_inc(sd, lb_gained[idle]);
1850 return 1;
1851 }
1852 }
1853
1854 return 0;
1855}
1856
1e3c88bd
PZ
1857static unsigned long
1858balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1859 unsigned long max_load_move, struct sched_domain *sd,
1860 enum cpu_idle_type idle, int *all_pinned,
ee00e66f 1861 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
1e3c88bd
PZ
1862{
1863 int loops = 0, pulled = 0, pinned = 0;
1e3c88bd 1864 long rem_load_move = max_load_move;
ee00e66f 1865 struct task_struct *p, *n;
1e3c88bd
PZ
1866
1867 if (max_load_move == 0)
1868 goto out;
1869
1870 pinned = 1;
1871
ee00e66f
PZ
1872 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
1873 if (loops++ > sysctl_sched_nr_migrate)
1874 break;
1e3c88bd 1875
ee00e66f
PZ
1876 if ((p->se.load.weight >> 1) > rem_load_move ||
1877 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
1878 continue;
1e3c88bd 1879
ee00e66f
PZ
1880 pull_task(busiest, p, this_rq, this_cpu);
1881 pulled++;
1882 rem_load_move -= p->se.load.weight;
1e3c88bd
PZ
1883
1884#ifdef CONFIG_PREEMPT
ee00e66f
PZ
1885 /*
1886 * NEWIDLE balancing is a source of latency, so preemptible
1887 * kernels will stop after the first task is pulled to minimize
1888 * the critical section.
1889 */
1890 if (idle == CPU_NEWLY_IDLE)
1891 break;
1e3c88bd
PZ
1892#endif
1893
ee00e66f
PZ
1894 /*
1895 * We only want to steal up to the prescribed amount of
1896 * weighted load.
1897 */
1898 if (rem_load_move <= 0)
1899 break;
1900
1e3c88bd
PZ
1901 if (p->prio < *this_best_prio)
1902 *this_best_prio = p->prio;
1e3c88bd
PZ
1903 }
1904out:
1905 /*
1906 * Right now, this is one of only two places pull_task() is called,
1907 * so we can safely collect pull_task() stats here rather than
1908 * inside pull_task().
1909 */
1910 schedstat_add(sd, lb_gained[idle], pulled);
1911
1912 if (all_pinned)
1913 *all_pinned = pinned;
1914
1915 return max_load_move - rem_load_move;
1916}
1917
230059de
PZ
1918#ifdef CONFIG_FAIR_GROUP_SCHED
1919static unsigned long
1920load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1921 unsigned long max_load_move,
1922 struct sched_domain *sd, enum cpu_idle_type idle,
1923 int *all_pinned, int *this_best_prio)
1924{
1925 long rem_load_move = max_load_move;
1926 int busiest_cpu = cpu_of(busiest);
1927 struct task_group *tg;
1928
1929 rcu_read_lock();
1930 update_h_load(busiest_cpu);
1931
1932 list_for_each_entry_rcu(tg, &task_groups, list) {
1933 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1934 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1935 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1936 u64 rem_load, moved_load;
1937
1938 /*
1939 * empty group
1940 */
1941 if (!busiest_cfs_rq->task_weight)
1942 continue;
1943
1944 rem_load = (u64)rem_load_move * busiest_weight;
1945 rem_load = div_u64(rem_load, busiest_h_load + 1);
1946
1947 moved_load = balance_tasks(this_rq, this_cpu, busiest,
1948 rem_load, sd, idle, all_pinned, this_best_prio,
1949 busiest_cfs_rq);
1950
1951 if (!moved_load)
1952 continue;
1953
1954 moved_load *= busiest_h_load;
1955 moved_load = div_u64(moved_load, busiest_weight + 1);
1956
1957 rem_load_move -= moved_load;
1958 if (rem_load_move < 0)
1959 break;
1960 }
1961 rcu_read_unlock();
1962
1963 return max_load_move - rem_load_move;
1964}
1965#else
1966static unsigned long
1967load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1968 unsigned long max_load_move,
1969 struct sched_domain *sd, enum cpu_idle_type idle,
1970 int *all_pinned, int *this_best_prio)
1971{
1972 return balance_tasks(this_rq, this_cpu, busiest,
1973 max_load_move, sd, idle, all_pinned,
1974 this_best_prio, &busiest->cfs);
1975}
1976#endif
1977
1e3c88bd
PZ
1978/*
1979 * move_tasks tries to move up to max_load_move weighted load from busiest to
1980 * this_rq, as part of a balancing operation within domain "sd".
1981 * Returns 1 if successful and 0 otherwise.
1982 *
1983 * Called with both runqueues locked.
1984 */
1985static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
1986 unsigned long max_load_move,
1987 struct sched_domain *sd, enum cpu_idle_type idle,
1988 int *all_pinned)
1989{
3d45fd80 1990 unsigned long total_load_moved = 0, load_moved;
1e3c88bd
PZ
1991 int this_best_prio = this_rq->curr->prio;
1992
1993 do {
3d45fd80 1994 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
1e3c88bd
PZ
1995 max_load_move - total_load_moved,
1996 sd, idle, all_pinned, &this_best_prio);
3d45fd80
PZ
1997
1998 total_load_moved += load_moved;
1e3c88bd
PZ
1999
2000#ifdef CONFIG_PREEMPT
2001 /*
2002 * NEWIDLE balancing is a source of latency, so preemptible
2003 * kernels will stop after the first task is pulled to minimize
2004 * the critical section.
2005 */
2006 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2007 break;
baa8c110
PZ
2008
2009 if (raw_spin_is_contended(&this_rq->lock) ||
2010 raw_spin_is_contended(&busiest->lock))
2011 break;
1e3c88bd 2012#endif
3d45fd80 2013 } while (load_moved && max_load_move > total_load_moved);
1e3c88bd
PZ
2014
2015 return total_load_moved > 0;
2016}
2017
1e3c88bd
PZ
2018/********** Helpers for find_busiest_group ************************/
2019/*
2020 * sd_lb_stats - Structure to store the statistics of a sched_domain
2021 * during load balancing.
2022 */
2023struct sd_lb_stats {
2024 struct sched_group *busiest; /* Busiest group in this sd */
2025 struct sched_group *this; /* Local group in this sd */
2026 unsigned long total_load; /* Total load of all groups in sd */
2027 unsigned long total_pwr; /* Total power of all groups in sd */
2028 unsigned long avg_load; /* Average load across all groups in sd */
2029
2030 /** Statistics of this group */
2031 unsigned long this_load;
2032 unsigned long this_load_per_task;
2033 unsigned long this_nr_running;
2034
2035 /* Statistics of the busiest group */
2036 unsigned long max_load;
2037 unsigned long busiest_load_per_task;
2038 unsigned long busiest_nr_running;
dd5feea1 2039 unsigned long busiest_group_capacity;
1e3c88bd
PZ
2040
2041 int group_imb; /* Is there imbalance in this sd */
2042#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2043 int power_savings_balance; /* Is powersave balance needed for this sd */
2044 struct sched_group *group_min; /* Least loaded group in sd */
2045 struct sched_group *group_leader; /* Group which relieves group_min */
2046 unsigned long min_load_per_task; /* load_per_task in group_min */
2047 unsigned long leader_nr_running; /* Nr running of group_leader */
2048 unsigned long min_nr_running; /* Nr running of group_min */
2049#endif
2050};
2051
2052/*
2053 * sg_lb_stats - stats of a sched_group required for load_balancing
2054 */
2055struct sg_lb_stats {
2056 unsigned long avg_load; /*Avg load across the CPUs of the group */
2057 unsigned long group_load; /* Total load over the CPUs of the group */
2058 unsigned long sum_nr_running; /* Nr tasks running in the group */
2059 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2060 unsigned long group_capacity;
2061 int group_imb; /* Is there an imbalance in the group ? */
2062};
2063
2064/**
2065 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
2066 * @group: The group whose first cpu is to be returned.
2067 */
2068static inline unsigned int group_first_cpu(struct sched_group *group)
2069{
2070 return cpumask_first(sched_group_cpus(group));
2071}
2072
2073/**
2074 * get_sd_load_idx - Obtain the load index for a given sched domain.
2075 * @sd: The sched_domain whose load_idx is to be obtained.
2076 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
2077 */
2078static inline int get_sd_load_idx(struct sched_domain *sd,
2079 enum cpu_idle_type idle)
2080{
2081 int load_idx;
2082
2083 switch (idle) {
2084 case CPU_NOT_IDLE:
2085 load_idx = sd->busy_idx;
2086 break;
2087
2088 case CPU_NEWLY_IDLE:
2089 load_idx = sd->newidle_idx;
2090 break;
2091 default:
2092 load_idx = sd->idle_idx;
2093 break;
2094 }
2095
2096 return load_idx;
2097}
2098
2099
2100#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2101/**
2102 * init_sd_power_savings_stats - Initialize power savings statistics for
2103 * the given sched_domain, during load balancing.
2104 *
2105 * @sd: Sched domain whose power-savings statistics are to be initialized.
2106 * @sds: Variable containing the statistics for sd.
2107 * @idle: Idle status of the CPU at which we're performing load-balancing.
2108 */
2109static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2110 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2111{
2112 /*
2113 * Busy processors will not participate in power savings
2114 * balance.
2115 */
2116 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2117 sds->power_savings_balance = 0;
2118 else {
2119 sds->power_savings_balance = 1;
2120 sds->min_nr_running = ULONG_MAX;
2121 sds->leader_nr_running = 0;
2122 }
2123}
2124
2125/**
2126 * update_sd_power_savings_stats - Update the power saving stats for a
2127 * sched_domain while performing load balancing.
2128 *
2129 * @group: sched_group belonging to the sched_domain under consideration.
2130 * @sds: Variable containing the statistics of the sched_domain
2131 * @local_group: Does group contain the CPU for which we're performing
2132 * load balancing ?
2133 * @sgs: Variable containing the statistics of the group.
2134 */
2135static inline void update_sd_power_savings_stats(struct sched_group *group,
2136 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2137{
2138
2139 if (!sds->power_savings_balance)
2140 return;
2141
2142 /*
2143 * If the local group is idle or completely loaded
2144 * no need to do power savings balance at this domain
2145 */
2146 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2147 !sds->this_nr_running))
2148 sds->power_savings_balance = 0;
2149
2150 /*
2151 * If a group is already running at full capacity or idle,
2152 * don't include that group in power savings calculations
2153 */
2154 if (!sds->power_savings_balance ||
2155 sgs->sum_nr_running >= sgs->group_capacity ||
2156 !sgs->sum_nr_running)
2157 return;
2158
2159 /*
2160 * Calculate the group which has the least non-idle load.
2161 * This is the group from where we need to pick up the load
2162 * for saving power
2163 */
2164 if ((sgs->sum_nr_running < sds->min_nr_running) ||
2165 (sgs->sum_nr_running == sds->min_nr_running &&
2166 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2167 sds->group_min = group;
2168 sds->min_nr_running = sgs->sum_nr_running;
2169 sds->min_load_per_task = sgs->sum_weighted_load /
2170 sgs->sum_nr_running;
2171 }
2172
2173 /*
2174 * Calculate the group which is almost near its
2175 * capacity but still has some space to pick up some load
2176 * from other group and save more power
2177 */
2178 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
2179 return;
2180
2181 if (sgs->sum_nr_running > sds->leader_nr_running ||
2182 (sgs->sum_nr_running == sds->leader_nr_running &&
2183 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2184 sds->group_leader = group;
2185 sds->leader_nr_running = sgs->sum_nr_running;
2186 }
2187}
2188
2189/**
2190 * check_power_save_busiest_group - see if there is potential for some power-savings balance
2191 * @sds: Variable containing the statistics of the sched_domain
2192 * under consideration.
2193 * @this_cpu: Cpu at which we're currently performing load-balancing.
2194 * @imbalance: Variable to store the imbalance.
2195 *
2196 * Description:
2197 * Check if we have potential to perform some power-savings balance.
2198 * If yes, set the busiest group to be the least loaded group in the
2199 * sched_domain, so that it's CPUs can be put to idle.
2200 *
2201 * Returns 1 if there is potential to perform power-savings balance.
2202 * Else returns 0.
2203 */
2204static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2205 int this_cpu, unsigned long *imbalance)
2206{
2207 if (!sds->power_savings_balance)
2208 return 0;
2209
2210 if (sds->this != sds->group_leader ||
2211 sds->group_leader == sds->group_min)
2212 return 0;
2213
2214 *imbalance = sds->min_load_per_task;
2215 sds->busiest = sds->group_min;
2216
2217 return 1;
2218
2219}
2220#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2221static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2222 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2223{
2224 return;
2225}
2226
2227static inline void update_sd_power_savings_stats(struct sched_group *group,
2228 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2229{
2230 return;
2231}
2232
2233static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2234 int this_cpu, unsigned long *imbalance)
2235{
2236 return 0;
2237}
2238#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2239
2240
2241unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
2242{
2243 return SCHED_LOAD_SCALE;
2244}
2245
2246unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2247{
2248 return default_scale_freq_power(sd, cpu);
2249}
2250
2251unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2252{
669c55e9 2253 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
2254 unsigned long smt_gain = sd->smt_gain;
2255
2256 smt_gain /= weight;
2257
2258 return smt_gain;
2259}
2260
2261unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
2262{
2263 return default_scale_smt_power(sd, cpu);
2264}
2265
2266unsigned long scale_rt_power(int cpu)
2267{
2268 struct rq *rq = cpu_rq(cpu);
2269 u64 total, available;
2270
2271 sched_avg_update(rq);
2272
2273 total = sched_avg_period() + (rq->clock - rq->age_stamp);
2274 available = total - rq->rt_avg;
2275
2276 if (unlikely((s64)total < SCHED_LOAD_SCALE))
2277 total = SCHED_LOAD_SCALE;
2278
2279 total >>= SCHED_LOAD_SHIFT;
2280
2281 return div_u64(available, total);
2282}
2283
2284static void update_cpu_power(struct sched_domain *sd, int cpu)
2285{
669c55e9 2286 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
2287 unsigned long power = SCHED_LOAD_SCALE;
2288 struct sched_group *sdg = sd->groups;
2289
1e3c88bd
PZ
2290 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
2291 if (sched_feat(ARCH_POWER))
2292 power *= arch_scale_smt_power(sd, cpu);
2293 else
2294 power *= default_scale_smt_power(sd, cpu);
2295
2296 power >>= SCHED_LOAD_SHIFT;
2297 }
2298
9d5efe05
SV
2299 sdg->cpu_power_orig = power;
2300
2301 if (sched_feat(ARCH_POWER))
2302 power *= arch_scale_freq_power(sd, cpu);
2303 else
2304 power *= default_scale_freq_power(sd, cpu);
2305
2306 power >>= SCHED_LOAD_SHIFT;
2307
1e3c88bd
PZ
2308 power *= scale_rt_power(cpu);
2309 power >>= SCHED_LOAD_SHIFT;
2310
2311 if (!power)
2312 power = 1;
2313
e51fd5e2 2314 cpu_rq(cpu)->cpu_power = power;
1e3c88bd
PZ
2315 sdg->cpu_power = power;
2316}
2317
2318static void update_group_power(struct sched_domain *sd, int cpu)
2319{
2320 struct sched_domain *child = sd->child;
2321 struct sched_group *group, *sdg = sd->groups;
2322 unsigned long power;
2323
2324 if (!child) {
2325 update_cpu_power(sd, cpu);
2326 return;
2327 }
2328
2329 power = 0;
2330
2331 group = child->groups;
2332 do {
2333 power += group->cpu_power;
2334 group = group->next;
2335 } while (group != child->groups);
2336
2337 sdg->cpu_power = power;
2338}
2339
9d5efe05
SV
2340/*
2341 * Try and fix up capacity for tiny siblings, this is needed when
2342 * things like SD_ASYM_PACKING need f_b_g to select another sibling
2343 * which on its own isn't powerful enough.
2344 *
2345 * See update_sd_pick_busiest() and check_asym_packing().
2346 */
2347static inline int
2348fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
2349{
2350 /*
2351 * Only siblings can have significantly less than SCHED_LOAD_SCALE
2352 */
2353 if (sd->level != SD_LV_SIBLING)
2354 return 0;
2355
2356 /*
2357 * If ~90% of the cpu_power is still there, we're good.
2358 */
694f5a11 2359 if (group->cpu_power * 32 > group->cpu_power_orig * 29)
9d5efe05
SV
2360 return 1;
2361
2362 return 0;
2363}
2364
1e3c88bd
PZ
2365/**
2366 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
2367 * @sd: The sched_domain whose statistics are to be updated.
2368 * @group: sched_group whose statistics are to be updated.
2369 * @this_cpu: Cpu for which load balance is currently performed.
2370 * @idle: Idle status of this_cpu
2371 * @load_idx: Load index of sched_domain of this_cpu for load calc.
2372 * @sd_idle: Idle status of the sched_domain containing group.
2373 * @local_group: Does group contain this_cpu.
2374 * @cpus: Set of cpus considered for load balancing.
2375 * @balance: Should we balance.
2376 * @sgs: variable to hold the statistics for this group.
2377 */
2378static inline void update_sg_lb_stats(struct sched_domain *sd,
2379 struct sched_group *group, int this_cpu,
2380 enum cpu_idle_type idle, int load_idx, int *sd_idle,
2381 int local_group, const struct cpumask *cpus,
2382 int *balance, struct sg_lb_stats *sgs)
2383{
2384 unsigned long load, max_cpu_load, min_cpu_load;
2385 int i;
2386 unsigned int balance_cpu = -1, first_idle_cpu = 0;
dd5feea1 2387 unsigned long avg_load_per_task = 0;
1e3c88bd 2388
871e35bc 2389 if (local_group)
1e3c88bd 2390 balance_cpu = group_first_cpu(group);
1e3c88bd
PZ
2391
2392 /* Tally up the load of all CPUs in the group */
1e3c88bd
PZ
2393 max_cpu_load = 0;
2394 min_cpu_load = ~0UL;
2395
2396 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2397 struct rq *rq = cpu_rq(i);
2398
2399 if (*sd_idle && rq->nr_running)
2400 *sd_idle = 0;
2401
2402 /* Bias balancing toward cpus of our domain */
2403 if (local_group) {
2404 if (idle_cpu(i) && !first_idle_cpu) {
2405 first_idle_cpu = 1;
2406 balance_cpu = i;
2407 }
2408
2409 load = target_load(i, load_idx);
2410 } else {
2411 load = source_load(i, load_idx);
2412 if (load > max_cpu_load)
2413 max_cpu_load = load;
2414 if (min_cpu_load > load)
2415 min_cpu_load = load;
2416 }
2417
2418 sgs->group_load += load;
2419 sgs->sum_nr_running += rq->nr_running;
2420 sgs->sum_weighted_load += weighted_cpuload(i);
2421
1e3c88bd
PZ
2422 }
2423
2424 /*
2425 * First idle cpu or the first cpu(busiest) in this sched group
2426 * is eligible for doing load balancing at this and above
2427 * domains. In the newly idle case, we will allow all the cpu's
2428 * to do the newly idle load balance.
2429 */
bbc8cb5b
PZ
2430 if (idle != CPU_NEWLY_IDLE && local_group) {
2431 if (balance_cpu != this_cpu) {
2432 *balance = 0;
2433 return;
2434 }
2435 update_group_power(sd, this_cpu);
1e3c88bd
PZ
2436 }
2437
2438 /* Adjust by relative CPU power of the group */
2439 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
2440
1e3c88bd
PZ
2441 /*
2442 * Consider the group unbalanced when the imbalance is larger
2443 * than the average weight of two tasks.
2444 *
2445 * APZ: with cgroup the avg task weight can vary wildly and
2446 * might not be a suitable number - should we keep a
2447 * normalized nr_running number somewhere that negates
2448 * the hierarchy?
2449 */
dd5feea1
SS
2450 if (sgs->sum_nr_running)
2451 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd
PZ
2452
2453 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
2454 sgs->group_imb = 1;
2455
2456 sgs->group_capacity =
2457 DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
9d5efe05
SV
2458 if (!sgs->group_capacity)
2459 sgs->group_capacity = fix_small_capacity(sd, group);
1e3c88bd
PZ
2460}
2461
532cb4c4
MN
2462/**
2463 * update_sd_pick_busiest - return 1 on busiest group
2464 * @sd: sched_domain whose statistics are to be checked
2465 * @sds: sched_domain statistics
2466 * @sg: sched_group candidate to be checked for being the busiest
b6b12294
MN
2467 * @sgs: sched_group statistics
2468 * @this_cpu: the current cpu
532cb4c4
MN
2469 *
2470 * Determine if @sg is a busier group than the previously selected
2471 * busiest group.
2472 */
2473static bool update_sd_pick_busiest(struct sched_domain *sd,
2474 struct sd_lb_stats *sds,
2475 struct sched_group *sg,
2476 struct sg_lb_stats *sgs,
2477 int this_cpu)
2478{
2479 if (sgs->avg_load <= sds->max_load)
2480 return false;
2481
2482 if (sgs->sum_nr_running > sgs->group_capacity)
2483 return true;
2484
2485 if (sgs->group_imb)
2486 return true;
2487
2488 /*
2489 * ASYM_PACKING needs to move all the work to the lowest
2490 * numbered CPUs in the group, therefore mark all groups
2491 * higher than ourself as busy.
2492 */
2493 if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
2494 this_cpu < group_first_cpu(sg)) {
2495 if (!sds->busiest)
2496 return true;
2497
2498 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
2499 return true;
2500 }
2501
2502 return false;
2503}
2504
1e3c88bd
PZ
2505/**
2506 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
2507 * @sd: sched_domain whose statistics are to be updated.
2508 * @this_cpu: Cpu for which load balance is currently performed.
2509 * @idle: Idle status of this_cpu
532cb4c4 2510 * @sd_idle: Idle status of the sched_domain containing sg.
1e3c88bd
PZ
2511 * @cpus: Set of cpus considered for load balancing.
2512 * @balance: Should we balance.
2513 * @sds: variable to hold the statistics for this sched_domain.
2514 */
2515static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2516 enum cpu_idle_type idle, int *sd_idle,
2517 const struct cpumask *cpus, int *balance,
2518 struct sd_lb_stats *sds)
2519{
2520 struct sched_domain *child = sd->child;
532cb4c4 2521 struct sched_group *sg = sd->groups;
1e3c88bd
PZ
2522 struct sg_lb_stats sgs;
2523 int load_idx, prefer_sibling = 0;
2524
2525 if (child && child->flags & SD_PREFER_SIBLING)
2526 prefer_sibling = 1;
2527
2528 init_sd_power_savings_stats(sd, sds, idle);
2529 load_idx = get_sd_load_idx(sd, idle);
2530
2531 do {
2532 int local_group;
2533
532cb4c4 2534 local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
1e3c88bd 2535 memset(&sgs, 0, sizeof(sgs));
532cb4c4 2536 update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, sd_idle,
1e3c88bd
PZ
2537 local_group, cpus, balance, &sgs);
2538
8f190fb3 2539 if (local_group && !(*balance))
1e3c88bd
PZ
2540 return;
2541
2542 sds->total_load += sgs.group_load;
532cb4c4 2543 sds->total_pwr += sg->cpu_power;
1e3c88bd
PZ
2544
2545 /*
2546 * In case the child domain prefers tasks go to siblings
532cb4c4 2547 * first, lower the sg capacity to one so that we'll try
1e3c88bd
PZ
2548 * and move all the excess tasks away.
2549 */
2550 if (prefer_sibling)
2551 sgs.group_capacity = min(sgs.group_capacity, 1UL);
2552
2553 if (local_group) {
2554 sds->this_load = sgs.avg_load;
532cb4c4 2555 sds->this = sg;
1e3c88bd
PZ
2556 sds->this_nr_running = sgs.sum_nr_running;
2557 sds->this_load_per_task = sgs.sum_weighted_load;
532cb4c4 2558 } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
1e3c88bd 2559 sds->max_load = sgs.avg_load;
532cb4c4 2560 sds->busiest = sg;
1e3c88bd 2561 sds->busiest_nr_running = sgs.sum_nr_running;
dd5feea1 2562 sds->busiest_group_capacity = sgs.group_capacity;
1e3c88bd
PZ
2563 sds->busiest_load_per_task = sgs.sum_weighted_load;
2564 sds->group_imb = sgs.group_imb;
2565 }
2566
532cb4c4
MN
2567 update_sd_power_savings_stats(sg, sds, local_group, &sgs);
2568 sg = sg->next;
2569 } while (sg != sd->groups);
2570}
2571
2ec57d44 2572int __weak arch_sd_sibling_asym_packing(void)
532cb4c4
MN
2573{
2574 return 0*SD_ASYM_PACKING;
2575}
2576
2577/**
2578 * check_asym_packing - Check to see if the group is packed into the
2579 * sched doman.
2580 *
2581 * This is primarily intended to used at the sibling level. Some
2582 * cores like POWER7 prefer to use lower numbered SMT threads. In the
2583 * case of POWER7, it can move to lower SMT modes only when higher
2584 * threads are idle. When in lower SMT modes, the threads will
2585 * perform better since they share less core resources. Hence when we
2586 * have idle threads, we want them to be the higher ones.
2587 *
2588 * This packing function is run on idle threads. It checks to see if
2589 * the busiest CPU in this domain (core in the P7 case) has a higher
2590 * CPU number than the packing function is being run on. Here we are
2591 * assuming lower CPU number will be equivalent to lower a SMT thread
2592 * number.
2593 *
b6b12294
MN
2594 * Returns 1 when packing is required and a task should be moved to
2595 * this CPU. The amount of the imbalance is returned in *imbalance.
2596 *
532cb4c4
MN
2597 * @sd: The sched_domain whose packing is to be checked.
2598 * @sds: Statistics of the sched_domain which is to be packed
2599 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2600 * @imbalance: returns amount of imbalanced due to packing.
532cb4c4
MN
2601 */
2602static int check_asym_packing(struct sched_domain *sd,
2603 struct sd_lb_stats *sds,
2604 int this_cpu, unsigned long *imbalance)
2605{
2606 int busiest_cpu;
2607
2608 if (!(sd->flags & SD_ASYM_PACKING))
2609 return 0;
2610
2611 if (!sds->busiest)
2612 return 0;
2613
2614 busiest_cpu = group_first_cpu(sds->busiest);
2615 if (this_cpu > busiest_cpu)
2616 return 0;
2617
2618 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
2619 SCHED_LOAD_SCALE);
2620 return 1;
1e3c88bd
PZ
2621}
2622
2623/**
2624 * fix_small_imbalance - Calculate the minor imbalance that exists
2625 * amongst the groups of a sched_domain, during
2626 * load balancing.
2627 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
2628 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2629 * @imbalance: Variable to store the imbalance.
2630 */
2631static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2632 int this_cpu, unsigned long *imbalance)
2633{
2634 unsigned long tmp, pwr_now = 0, pwr_move = 0;
2635 unsigned int imbn = 2;
dd5feea1 2636 unsigned long scaled_busy_load_per_task;
1e3c88bd
PZ
2637
2638 if (sds->this_nr_running) {
2639 sds->this_load_per_task /= sds->this_nr_running;
2640 if (sds->busiest_load_per_task >
2641 sds->this_load_per_task)
2642 imbn = 1;
2643 } else
2644 sds->this_load_per_task =
2645 cpu_avg_load_per_task(this_cpu);
2646
dd5feea1
SS
2647 scaled_busy_load_per_task = sds->busiest_load_per_task
2648 * SCHED_LOAD_SCALE;
2649 scaled_busy_load_per_task /= sds->busiest->cpu_power;
2650
2651 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
2652 (scaled_busy_load_per_task * imbn)) {
1e3c88bd
PZ
2653 *imbalance = sds->busiest_load_per_task;
2654 return;
2655 }
2656
2657 /*
2658 * OK, we don't have enough imbalance to justify moving tasks,
2659 * however we may be able to increase total CPU power used by
2660 * moving them.
2661 */
2662
2663 pwr_now += sds->busiest->cpu_power *
2664 min(sds->busiest_load_per_task, sds->max_load);
2665 pwr_now += sds->this->cpu_power *
2666 min(sds->this_load_per_task, sds->this_load);
2667 pwr_now /= SCHED_LOAD_SCALE;
2668
2669 /* Amount of load we'd subtract */
2670 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2671 sds->busiest->cpu_power;
2672 if (sds->max_load > tmp)
2673 pwr_move += sds->busiest->cpu_power *
2674 min(sds->busiest_load_per_task, sds->max_load - tmp);
2675
2676 /* Amount of load we'd add */
2677 if (sds->max_load * sds->busiest->cpu_power <
2678 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
2679 tmp = (sds->max_load * sds->busiest->cpu_power) /
2680 sds->this->cpu_power;
2681 else
2682 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2683 sds->this->cpu_power;
2684 pwr_move += sds->this->cpu_power *
2685 min(sds->this_load_per_task, sds->this_load + tmp);
2686 pwr_move /= SCHED_LOAD_SCALE;
2687
2688 /* Move if we gain throughput */
2689 if (pwr_move > pwr_now)
2690 *imbalance = sds->busiest_load_per_task;
2691}
2692
2693/**
2694 * calculate_imbalance - Calculate the amount of imbalance present within the
2695 * groups of a given sched_domain during load balance.
2696 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
2697 * @this_cpu: Cpu for which currently load balance is being performed.
2698 * @imbalance: The variable to store the imbalance.
2699 */
2700static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
2701 unsigned long *imbalance)
2702{
dd5feea1
SS
2703 unsigned long max_pull, load_above_capacity = ~0UL;
2704
2705 sds->busiest_load_per_task /= sds->busiest_nr_running;
2706 if (sds->group_imb) {
2707 sds->busiest_load_per_task =
2708 min(sds->busiest_load_per_task, sds->avg_load);
2709 }
2710
1e3c88bd
PZ
2711 /*
2712 * In the presence of smp nice balancing, certain scenarios can have
2713 * max load less than avg load(as we skip the groups at or below
2714 * its cpu_power, while calculating max_load..)
2715 */
2716 if (sds->max_load < sds->avg_load) {
2717 *imbalance = 0;
2718 return fix_small_imbalance(sds, this_cpu, imbalance);
2719 }
2720
dd5feea1
SS
2721 if (!sds->group_imb) {
2722 /*
2723 * Don't want to pull so many tasks that a group would go idle.
2724 */
2725 load_above_capacity = (sds->busiest_nr_running -
2726 sds->busiest_group_capacity);
2727
2728 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
2729
2730 load_above_capacity /= sds->busiest->cpu_power;
2731 }
2732
2733 /*
2734 * We're trying to get all the cpus to the average_load, so we don't
2735 * want to push ourselves above the average load, nor do we wish to
2736 * reduce the max loaded cpu below the average load. At the same time,
2737 * we also don't want to reduce the group load below the group capacity
2738 * (so that we can implement power-savings policies etc). Thus we look
2739 * for the minimum possible imbalance.
2740 * Be careful of negative numbers as they'll appear as very large values
2741 * with unsigned longs.
2742 */
2743 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
2744
2745 /* How much load to actually move to equalise the imbalance */
2746 *imbalance = min(max_pull * sds->busiest->cpu_power,
2747 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
2748 / SCHED_LOAD_SCALE;
2749
2750 /*
2751 * if *imbalance is less than the average load per runnable task
2752 * there is no gaurantee that any tasks will be moved so we'll have
2753 * a think about bumping its value to force at least one task to be
2754 * moved
2755 */
2756 if (*imbalance < sds->busiest_load_per_task)
2757 return fix_small_imbalance(sds, this_cpu, imbalance);
2758
2759}
2760/******* find_busiest_group() helpers end here *********************/
2761
2762/**
2763 * find_busiest_group - Returns the busiest group within the sched_domain
2764 * if there is an imbalance. If there isn't an imbalance, and
2765 * the user has opted for power-savings, it returns a group whose
2766 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
2767 * such a group exists.
2768 *
2769 * Also calculates the amount of weighted load which should be moved
2770 * to restore balance.
2771 *
2772 * @sd: The sched_domain whose busiest group is to be returned.
2773 * @this_cpu: The cpu for which load balancing is currently being performed.
2774 * @imbalance: Variable which stores amount of weighted load which should
2775 * be moved to restore balance/put a group to idle.
2776 * @idle: The idle status of this_cpu.
2777 * @sd_idle: The idleness of sd
2778 * @cpus: The set of CPUs under consideration for load-balancing.
2779 * @balance: Pointer to a variable indicating if this_cpu
2780 * is the appropriate cpu to perform load balancing at this_level.
2781 *
2782 * Returns: - the busiest group if imbalance exists.
2783 * - If no imbalance and user has opted for power-savings balance,
2784 * return the least loaded group whose CPUs can be
2785 * put to idle by rebalancing its tasks onto our group.
2786 */
2787static struct sched_group *
2788find_busiest_group(struct sched_domain *sd, int this_cpu,
2789 unsigned long *imbalance, enum cpu_idle_type idle,
2790 int *sd_idle, const struct cpumask *cpus, int *balance)
2791{
2792 struct sd_lb_stats sds;
2793
2794 memset(&sds, 0, sizeof(sds));
2795
2796 /*
2797 * Compute the various statistics relavent for load balancing at
2798 * this level.
2799 */
2800 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
2801 balance, &sds);
2802
2803 /* Cases where imbalance does not exist from POV of this_cpu */
2804 /* 1) this_cpu is not the appropriate cpu to perform load balancing
2805 * at this level.
2806 * 2) There is no busy sibling group to pull from.
2807 * 3) This group is the busiest group.
2808 * 4) This group is more busy than the avg busieness at this
2809 * sched_domain.
2810 * 5) The imbalance is within the specified limit.
1e3c88bd 2811 */
8f190fb3 2812 if (!(*balance))
1e3c88bd
PZ
2813 goto ret;
2814
532cb4c4
MN
2815 if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
2816 check_asym_packing(sd, &sds, this_cpu, imbalance))
2817 return sds.busiest;
2818
1e3c88bd
PZ
2819 if (!sds.busiest || sds.busiest_nr_running == 0)
2820 goto out_balanced;
2821
2822 if (sds.this_load >= sds.max_load)
2823 goto out_balanced;
2824
2825 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
2826
2827 if (sds.this_load >= sds.avg_load)
2828 goto out_balanced;
2829
2830 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
2831 goto out_balanced;
2832
1e3c88bd
PZ
2833 /* Looks like there is an imbalance. Compute it */
2834 calculate_imbalance(&sds, this_cpu, imbalance);
2835 return sds.busiest;
2836
2837out_balanced:
2838 /*
2839 * There is no obvious imbalance. But check if we can do some balancing
2840 * to save power.
2841 */
2842 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
2843 return sds.busiest;
2844ret:
2845 *imbalance = 0;
2846 return NULL;
2847}
2848
2849/*
2850 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2851 */
2852static struct rq *
9d5efe05
SV
2853find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
2854 enum cpu_idle_type idle, unsigned long imbalance,
2855 const struct cpumask *cpus)
1e3c88bd
PZ
2856{
2857 struct rq *busiest = NULL, *rq;
2858 unsigned long max_load = 0;
2859 int i;
2860
2861 for_each_cpu(i, sched_group_cpus(group)) {
2862 unsigned long power = power_of(i);
2863 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
2864 unsigned long wl;
2865
9d5efe05
SV
2866 if (!capacity)
2867 capacity = fix_small_capacity(sd, group);
2868
1e3c88bd
PZ
2869 if (!cpumask_test_cpu(i, cpus))
2870 continue;
2871
2872 rq = cpu_rq(i);
6e40f5bb 2873 wl = weighted_cpuload(i);
1e3c88bd 2874
6e40f5bb
TG
2875 /*
2876 * When comparing with imbalance, use weighted_cpuload()
2877 * which is not scaled with the cpu power.
2878 */
1e3c88bd
PZ
2879 if (capacity && rq->nr_running == 1 && wl > imbalance)
2880 continue;
2881
6e40f5bb
TG
2882 /*
2883 * For the load comparisons with the other cpu's, consider
2884 * the weighted_cpuload() scaled with the cpu power, so that
2885 * the load can be moved away from the cpu that is potentially
2886 * running at a lower capacity.
2887 */
2888 wl = (wl * SCHED_LOAD_SCALE) / power;
2889
1e3c88bd
PZ
2890 if (wl > max_load) {
2891 max_load = wl;
2892 busiest = rq;
2893 }
2894 }
2895
2896 return busiest;
2897}
2898
2899/*
2900 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
2901 * so long as it is large enough.
2902 */
2903#define MAX_PINNED_INTERVAL 512
2904
2905/* Working cpumask for load_balance and load_balance_newidle. */
2906static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
2907
532cb4c4
MN
2908static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
2909 int busiest_cpu, int this_cpu)
1af3ed3d
PZ
2910{
2911 if (idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
2912
2913 /*
2914 * ASYM_PACKING needs to force migrate tasks from busy but
2915 * higher numbered CPUs in order to pack all tasks in the
2916 * lowest numbered CPUs.
2917 */
2918 if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
2919 return 1;
2920
1af3ed3d
PZ
2921 /*
2922 * The only task running in a non-idle cpu can be moved to this
2923 * cpu in an attempt to completely freeup the other CPU
2924 * package.
2925 *
2926 * The package power saving logic comes from
2927 * find_busiest_group(). If there are no imbalance, then
2928 * f_b_g() will return NULL. However when sched_mc={1,2} then
2929 * f_b_g() will select a group from which a running task may be
2930 * pulled to this cpu in order to make the other package idle.
2931 * If there is no opportunity to make a package idle and if
2932 * there are no imbalance, then f_b_g() will return NULL and no
2933 * action will be taken in load_balance_newidle().
2934 *
2935 * Under normal task pull operation due to imbalance, there
2936 * will be more than one task in the source run queue and
2937 * move_tasks() will succeed. ld_moved will be true and this
2938 * active balance code will not be triggered.
2939 */
2940 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2941 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2942 return 0;
2943
2944 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
2945 return 0;
2946 }
2947
2948 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
2949}
2950
969c7921
TH
2951static int active_load_balance_cpu_stop(void *data);
2952
1e3c88bd
PZ
2953/*
2954 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2955 * tasks if there is an imbalance.
2956 */
2957static int load_balance(int this_cpu, struct rq *this_rq,
2958 struct sched_domain *sd, enum cpu_idle_type idle,
2959 int *balance)
2960{
2961 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
2962 struct sched_group *group;
2963 unsigned long imbalance;
2964 struct rq *busiest;
2965 unsigned long flags;
2966 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
2967
2968 cpumask_copy(cpus, cpu_active_mask);
2969
2970 /*
2971 * When power savings policy is enabled for the parent domain, idle
2972 * sibling can pick up load irrespective of busy siblings. In this case,
2973 * let the state of idle sibling percolate up as CPU_IDLE, instead of
2974 * portraying it as CPU_NOT_IDLE.
2975 */
2976 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2977 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2978 sd_idle = 1;
2979
2980 schedstat_inc(sd, lb_count[idle]);
2981
2982redo:
2983 update_shares(sd);
2984 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
2985 cpus, balance);
2986
2987 if (*balance == 0)
2988 goto out_balanced;
2989
2990 if (!group) {
2991 schedstat_inc(sd, lb_nobusyg[idle]);
2992 goto out_balanced;
2993 }
2994
9d5efe05 2995 busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
1e3c88bd
PZ
2996 if (!busiest) {
2997 schedstat_inc(sd, lb_nobusyq[idle]);
2998 goto out_balanced;
2999 }
3000
3001 BUG_ON(busiest == this_rq);
3002
3003 schedstat_add(sd, lb_imbalance[idle], imbalance);
3004
3005 ld_moved = 0;
3006 if (busiest->nr_running > 1) {
3007 /*
3008 * Attempt to move tasks. If find_busiest_group has found
3009 * an imbalance but busiest->nr_running <= 1, the group is
3010 * still unbalanced. ld_moved simply stays zero, so it is
3011 * correctly treated as an imbalance.
3012 */
3013 local_irq_save(flags);
3014 double_rq_lock(this_rq, busiest);
3015 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3016 imbalance, sd, idle, &all_pinned);
3017 double_rq_unlock(this_rq, busiest);
3018 local_irq_restore(flags);
3019
3020 /*
3021 * some other cpu did the load balance for us.
3022 */
3023 if (ld_moved && this_cpu != smp_processor_id())
3024 resched_cpu(this_cpu);
3025
3026 /* All tasks on this runqueue were pinned by CPU affinity */
3027 if (unlikely(all_pinned)) {
3028 cpumask_clear_cpu(cpu_of(busiest), cpus);
3029 if (!cpumask_empty(cpus))
3030 goto redo;
3031 goto out_balanced;
3032 }
3033 }
3034
3035 if (!ld_moved) {
3036 schedstat_inc(sd, lb_failed[idle]);
3037 sd->nr_balance_failed++;
3038
532cb4c4
MN
3039 if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
3040 this_cpu)) {
1e3c88bd
PZ
3041 raw_spin_lock_irqsave(&busiest->lock, flags);
3042
969c7921
TH
3043 /* don't kick the active_load_balance_cpu_stop,
3044 * if the curr task on busiest cpu can't be
3045 * moved to this_cpu
1e3c88bd
PZ
3046 */
3047 if (!cpumask_test_cpu(this_cpu,
3048 &busiest->curr->cpus_allowed)) {
3049 raw_spin_unlock_irqrestore(&busiest->lock,
3050 flags);
3051 all_pinned = 1;
3052 goto out_one_pinned;
3053 }
3054
969c7921
TH
3055 /*
3056 * ->active_balance synchronizes accesses to
3057 * ->active_balance_work. Once set, it's cleared
3058 * only after active load balance is finished.
3059 */
1e3c88bd
PZ
3060 if (!busiest->active_balance) {
3061 busiest->active_balance = 1;
3062 busiest->push_cpu = this_cpu;
3063 active_balance = 1;
3064 }
3065 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 3066
1e3c88bd 3067 if (active_balance)
969c7921
TH
3068 stop_one_cpu_nowait(cpu_of(busiest),
3069 active_load_balance_cpu_stop, busiest,
3070 &busiest->active_balance_work);
1e3c88bd
PZ
3071
3072 /*
3073 * We've kicked active balancing, reset the failure
3074 * counter.
3075 */
3076 sd->nr_balance_failed = sd->cache_nice_tries+1;
3077 }
3078 } else
3079 sd->nr_balance_failed = 0;
3080
3081 if (likely(!active_balance)) {
3082 /* We were unbalanced, so reset the balancing interval */
3083 sd->balance_interval = sd->min_interval;
3084 } else {
3085 /*
3086 * If we've begun active balancing, start to back off. This
3087 * case may not be covered by the all_pinned logic if there
3088 * is only 1 task on the busy runqueue (because we don't call
3089 * move_tasks).
3090 */
3091 if (sd->balance_interval < sd->max_interval)
3092 sd->balance_interval *= 2;
3093 }
3094
3095 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3096 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3097 ld_moved = -1;
3098
3099 goto out;
3100
3101out_balanced:
3102 schedstat_inc(sd, lb_balanced[idle]);
3103
3104 sd->nr_balance_failed = 0;
3105
3106out_one_pinned:
3107 /* tune up the balancing interval */
3108 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3109 (sd->balance_interval < sd->max_interval))
3110 sd->balance_interval *= 2;
3111
3112 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3113 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3114 ld_moved = -1;
3115 else
3116 ld_moved = 0;
3117out:
3118 if (ld_moved)
3119 update_shares(sd);
3120 return ld_moved;
3121}
3122
1e3c88bd
PZ
3123/*
3124 * idle_balance is called by schedule() if this_cpu is about to become
3125 * idle. Attempts to pull tasks from other CPUs.
3126 */
3127static void idle_balance(int this_cpu, struct rq *this_rq)
3128{
3129 struct sched_domain *sd;
3130 int pulled_task = 0;
3131 unsigned long next_balance = jiffies + HZ;
3132
3133 this_rq->idle_stamp = this_rq->clock;
3134
3135 if (this_rq->avg_idle < sysctl_sched_migration_cost)
3136 return;
3137
f492e12e
PZ
3138 /*
3139 * Drop the rq->lock, but keep IRQ/preempt disabled.
3140 */
3141 raw_spin_unlock(&this_rq->lock);
3142
1e3c88bd
PZ
3143 for_each_domain(this_cpu, sd) {
3144 unsigned long interval;
f492e12e 3145 int balance = 1;
1e3c88bd
PZ
3146
3147 if (!(sd->flags & SD_LOAD_BALANCE))
3148 continue;
3149
f492e12e 3150 if (sd->flags & SD_BALANCE_NEWIDLE) {
1e3c88bd 3151 /* If we've pulled tasks over stop searching: */
f492e12e
PZ
3152 pulled_task = load_balance(this_cpu, this_rq,
3153 sd, CPU_NEWLY_IDLE, &balance);
3154 }
1e3c88bd
PZ
3155
3156 interval = msecs_to_jiffies(sd->balance_interval);
3157 if (time_after(next_balance, sd->last_balance + interval))
3158 next_balance = sd->last_balance + interval;
3159 if (pulled_task) {
3160 this_rq->idle_stamp = 0;
3161 break;
3162 }
3163 }
f492e12e
PZ
3164
3165 raw_spin_lock(&this_rq->lock);
3166
1e3c88bd
PZ
3167 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3168 /*
3169 * We are going idle. next_balance may be set based on
3170 * a busy processor. So reset next_balance.
3171 */
3172 this_rq->next_balance = next_balance;
3173 }
3174}
3175
3176/*
969c7921
TH
3177 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
3178 * running tasks off the busiest CPU onto idle CPUs. It requires at
3179 * least 1 task to be running on each physical CPU where possible, and
3180 * avoids physical / logical imbalances.
1e3c88bd 3181 */
969c7921 3182static int active_load_balance_cpu_stop(void *data)
1e3c88bd 3183{
969c7921
TH
3184 struct rq *busiest_rq = data;
3185 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 3186 int target_cpu = busiest_rq->push_cpu;
969c7921 3187 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 3188 struct sched_domain *sd;
969c7921
TH
3189
3190 raw_spin_lock_irq(&busiest_rq->lock);
3191
3192 /* make sure the requested cpu hasn't gone down in the meantime */
3193 if (unlikely(busiest_cpu != smp_processor_id() ||
3194 !busiest_rq->active_balance))
3195 goto out_unlock;
1e3c88bd
PZ
3196
3197 /* Is there any task to move? */
3198 if (busiest_rq->nr_running <= 1)
969c7921 3199 goto out_unlock;
1e3c88bd
PZ
3200
3201 /*
3202 * This condition is "impossible", if it occurs
3203 * we need to fix it. Originally reported by
3204 * Bjorn Helgaas on a 128-cpu setup.
3205 */
3206 BUG_ON(busiest_rq == target_rq);
3207
3208 /* move a task from busiest_rq to target_rq */
3209 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
3210
3211 /* Search for an sd spanning us and the target CPU. */
3212 for_each_domain(target_cpu, sd) {
3213 if ((sd->flags & SD_LOAD_BALANCE) &&
3214 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3215 break;
3216 }
3217
3218 if (likely(sd)) {
3219 schedstat_inc(sd, alb_count);
3220
3221 if (move_one_task(target_rq, target_cpu, busiest_rq,
3222 sd, CPU_IDLE))
3223 schedstat_inc(sd, alb_pushed);
3224 else
3225 schedstat_inc(sd, alb_failed);
3226 }
3227 double_unlock_balance(busiest_rq, target_rq);
969c7921
TH
3228out_unlock:
3229 busiest_rq->active_balance = 0;
3230 raw_spin_unlock_irq(&busiest_rq->lock);
3231 return 0;
1e3c88bd
PZ
3232}
3233
3234#ifdef CONFIG_NO_HZ
83cd4fe2
VP
3235
3236static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
3237
3238static void trigger_sched_softirq(void *data)
3239{
3240 raise_softirq_irqoff(SCHED_SOFTIRQ);
3241}
3242
3243static inline void init_sched_softirq_csd(struct call_single_data *csd)
3244{
3245 csd->func = trigger_sched_softirq;
3246 csd->info = NULL;
3247 csd->flags = 0;
3248 csd->priv = 0;
3249}
3250
3251/*
3252 * idle load balancing details
3253 * - One of the idle CPUs nominates itself as idle load_balancer, while
3254 * entering idle.
3255 * - This idle load balancer CPU will also go into tickless mode when
3256 * it is idle, just like all other idle CPUs
3257 * - When one of the busy CPUs notice that there may be an idle rebalancing
3258 * needed, they will kick the idle load balancer, which then does idle
3259 * load balancing for all the idle CPUs.
3260 */
1e3c88bd
PZ
3261static struct {
3262 atomic_t load_balancer;
83cd4fe2
VP
3263 atomic_t first_pick_cpu;
3264 atomic_t second_pick_cpu;
3265 cpumask_var_t idle_cpus_mask;
3266 cpumask_var_t grp_idle_mask;
3267 unsigned long next_balance; /* in jiffy units */
3268} nohz ____cacheline_aligned;
1e3c88bd
PZ
3269
3270int get_nohz_load_balancer(void)
3271{
3272 return atomic_read(&nohz.load_balancer);
3273}
3274
3275#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3276/**
3277 * lowest_flag_domain - Return lowest sched_domain containing flag.
3278 * @cpu: The cpu whose lowest level of sched domain is to
3279 * be returned.
3280 * @flag: The flag to check for the lowest sched_domain
3281 * for the given cpu.
3282 *
3283 * Returns the lowest sched_domain of a cpu which contains the given flag.
3284 */
3285static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
3286{
3287 struct sched_domain *sd;
3288
3289 for_each_domain(cpu, sd)
3290 if (sd && (sd->flags & flag))
3291 break;
3292
3293 return sd;
3294}
3295
3296/**
3297 * for_each_flag_domain - Iterates over sched_domains containing the flag.
3298 * @cpu: The cpu whose domains we're iterating over.
3299 * @sd: variable holding the value of the power_savings_sd
3300 * for cpu.
3301 * @flag: The flag to filter the sched_domains to be iterated.
3302 *
3303 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
3304 * set, starting from the lowest sched_domain to the highest.
3305 */
3306#define for_each_flag_domain(cpu, sd, flag) \
3307 for (sd = lowest_flag_domain(cpu, flag); \
3308 (sd && (sd->flags & flag)); sd = sd->parent)
3309
3310/**
3311 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
3312 * @ilb_group: group to be checked for semi-idleness
3313 *
3314 * Returns: 1 if the group is semi-idle. 0 otherwise.
3315 *
3316 * We define a sched_group to be semi idle if it has atleast one idle-CPU
3317 * and atleast one non-idle CPU. This helper function checks if the given
3318 * sched_group is semi-idle or not.
3319 */
3320static inline int is_semi_idle_group(struct sched_group *ilb_group)
3321{
83cd4fe2 3322 cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
1e3c88bd
PZ
3323 sched_group_cpus(ilb_group));
3324
3325 /*
3326 * A sched_group is semi-idle when it has atleast one busy cpu
3327 * and atleast one idle cpu.
3328 */
83cd4fe2 3329 if (cpumask_empty(nohz.grp_idle_mask))
1e3c88bd
PZ
3330 return 0;
3331
83cd4fe2 3332 if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
1e3c88bd
PZ
3333 return 0;
3334
3335 return 1;
3336}
3337/**
3338 * find_new_ilb - Finds the optimum idle load balancer for nomination.
3339 * @cpu: The cpu which is nominating a new idle_load_balancer.
3340 *
3341 * Returns: Returns the id of the idle load balancer if it exists,
3342 * Else, returns >= nr_cpu_ids.
3343 *
3344 * This algorithm picks the idle load balancer such that it belongs to a
3345 * semi-idle powersavings sched_domain. The idea is to try and avoid
3346 * completely idle packages/cores just for the purpose of idle load balancing
3347 * when there are other idle cpu's which are better suited for that job.
3348 */
3349static int find_new_ilb(int cpu)
3350{
3351 struct sched_domain *sd;
3352 struct sched_group *ilb_group;
3353
3354 /*
3355 * Have idle load balancer selection from semi-idle packages only
3356 * when power-aware load balancing is enabled
3357 */
3358 if (!(sched_smt_power_savings || sched_mc_power_savings))
3359 goto out_done;
3360
3361 /*
3362 * Optimize for the case when we have no idle CPUs or only one
3363 * idle CPU. Don't walk the sched_domain hierarchy in such cases
3364 */
83cd4fe2 3365 if (cpumask_weight(nohz.idle_cpus_mask) < 2)
1e3c88bd
PZ
3366 goto out_done;
3367
3368 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
3369 ilb_group = sd->groups;
3370
3371 do {
3372 if (is_semi_idle_group(ilb_group))
83cd4fe2 3373 return cpumask_first(nohz.grp_idle_mask);
1e3c88bd
PZ
3374
3375 ilb_group = ilb_group->next;
3376
3377 } while (ilb_group != sd->groups);
3378 }
3379
3380out_done:
83cd4fe2 3381 return nr_cpu_ids;
1e3c88bd
PZ
3382}
3383#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
3384static inline int find_new_ilb(int call_cpu)
3385{
83cd4fe2 3386 return nr_cpu_ids;
1e3c88bd
PZ
3387}
3388#endif
3389
83cd4fe2
VP
3390/*
3391 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
3392 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
3393 * CPU (if there is one).
3394 */
3395static void nohz_balancer_kick(int cpu)
3396{
3397 int ilb_cpu;
3398
3399 nohz.next_balance++;
3400
3401 ilb_cpu = get_nohz_load_balancer();
3402
3403 if (ilb_cpu >= nr_cpu_ids) {
3404 ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
3405 if (ilb_cpu >= nr_cpu_ids)
3406 return;
3407 }
3408
3409 if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
3410 struct call_single_data *cp;
3411
3412 cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
3413 cp = &per_cpu(remote_sched_softirq_cb, cpu);
3414 __smp_call_function_single(ilb_cpu, cp, 0);
3415 }
3416 return;
3417}
3418
1e3c88bd
PZ
3419/*
3420 * This routine will try to nominate the ilb (idle load balancing)
3421 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
83cd4fe2 3422 * load balancing on behalf of all those cpus.
1e3c88bd 3423 *
83cd4fe2
VP
3424 * When the ilb owner becomes busy, we will not have new ilb owner until some
3425 * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
3426 * idle load balancing by kicking one of the idle CPUs.
1e3c88bd 3427 *
83cd4fe2
VP
3428 * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
3429 * ilb owner CPU in future (when there is a need for idle load balancing on
3430 * behalf of all idle CPUs).
1e3c88bd 3431 */
83cd4fe2 3432void select_nohz_load_balancer(int stop_tick)
1e3c88bd
PZ
3433{
3434 int cpu = smp_processor_id();
3435
3436 if (stop_tick) {
1e3c88bd
PZ
3437 if (!cpu_active(cpu)) {
3438 if (atomic_read(&nohz.load_balancer) != cpu)
83cd4fe2 3439 return;
1e3c88bd
PZ
3440
3441 /*
3442 * If we are going offline and still the leader,
3443 * give up!
3444 */
83cd4fe2
VP
3445 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3446 nr_cpu_ids) != cpu)
1e3c88bd
PZ
3447 BUG();
3448
83cd4fe2 3449 return;
1e3c88bd
PZ
3450 }
3451
83cd4fe2 3452 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
1e3c88bd 3453
83cd4fe2
VP
3454 if (atomic_read(&nohz.first_pick_cpu) == cpu)
3455 atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
3456 if (atomic_read(&nohz.second_pick_cpu) == cpu)
3457 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
1e3c88bd 3458
83cd4fe2 3459 if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
1e3c88bd
PZ
3460 int new_ilb;
3461
83cd4fe2
VP
3462 /* make me the ilb owner */
3463 if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
3464 cpu) != nr_cpu_ids)
3465 return;
3466
1e3c88bd
PZ
3467 /*
3468 * Check to see if there is a more power-efficient
3469 * ilb.
3470 */
3471 new_ilb = find_new_ilb(cpu);
3472 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
83cd4fe2 3473 atomic_set(&nohz.load_balancer, nr_cpu_ids);
1e3c88bd 3474 resched_cpu(new_ilb);
83cd4fe2 3475 return;
1e3c88bd 3476 }
83cd4fe2 3477 return;
1e3c88bd
PZ
3478 }
3479 } else {
83cd4fe2
VP
3480 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
3481 return;
1e3c88bd 3482
83cd4fe2 3483 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
1e3c88bd
PZ
3484
3485 if (atomic_read(&nohz.load_balancer) == cpu)
83cd4fe2
VP
3486 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3487 nr_cpu_ids) != cpu)
1e3c88bd
PZ
3488 BUG();
3489 }
83cd4fe2 3490 return;
1e3c88bd
PZ
3491}
3492#endif
3493
3494static DEFINE_SPINLOCK(balancing);
3495
3496/*
3497 * It checks each scheduling domain to see if it is due to be balanced,
3498 * and initiates a balancing operation if so.
3499 *
3500 * Balancing parameters are set up in arch_init_sched_domains.
3501 */
3502static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3503{
3504 int balance = 1;
3505 struct rq *rq = cpu_rq(cpu);
3506 unsigned long interval;
3507 struct sched_domain *sd;
3508 /* Earliest time when we have to do rebalance again */
3509 unsigned long next_balance = jiffies + 60*HZ;
3510 int update_next_balance = 0;
3511 int need_serialize;
3512
3513 for_each_domain(cpu, sd) {
3514 if (!(sd->flags & SD_LOAD_BALANCE))
3515 continue;
3516
3517 interval = sd->balance_interval;
3518 if (idle != CPU_IDLE)
3519 interval *= sd->busy_factor;
3520
3521 /* scale ms to jiffies */
3522 interval = msecs_to_jiffies(interval);
3523 if (unlikely(!interval))
3524 interval = 1;
3525 if (interval > HZ*NR_CPUS/10)
3526 interval = HZ*NR_CPUS/10;
3527
3528 need_serialize = sd->flags & SD_SERIALIZE;
3529
3530 if (need_serialize) {
3531 if (!spin_trylock(&balancing))
3532 goto out;
3533 }
3534
3535 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3536 if (load_balance(cpu, rq, sd, idle, &balance)) {
3537 /*
3538 * We've pulled tasks over so either we're no
3539 * longer idle, or one of our SMT siblings is
3540 * not idle.
3541 */
3542 idle = CPU_NOT_IDLE;
3543 }
3544 sd->last_balance = jiffies;
3545 }
3546 if (need_serialize)
3547 spin_unlock(&balancing);
3548out:
3549 if (time_after(next_balance, sd->last_balance + interval)) {
3550 next_balance = sd->last_balance + interval;
3551 update_next_balance = 1;
3552 }
3553
3554 /*
3555 * Stop the load balance at this level. There is another
3556 * CPU in our sched group which is doing load balancing more
3557 * actively.
3558 */
3559 if (!balance)
3560 break;
3561 }
3562
3563 /*
3564 * next_balance will be updated only when there is a need.
3565 * When the cpu is attached to null domain for ex, it will not be
3566 * updated.
3567 */
3568 if (likely(update_next_balance))
3569 rq->next_balance = next_balance;
3570}
3571
83cd4fe2 3572#ifdef CONFIG_NO_HZ
1e3c88bd 3573/*
83cd4fe2 3574 * In CONFIG_NO_HZ case, the idle balance kickee will do the
1e3c88bd
PZ
3575 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3576 */
83cd4fe2
VP
3577static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
3578{
3579 struct rq *this_rq = cpu_rq(this_cpu);
3580 struct rq *rq;
3581 int balance_cpu;
3582
3583 if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
3584 return;
3585
3586 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
3587 if (balance_cpu == this_cpu)
3588 continue;
3589
3590 /*
3591 * If this cpu gets work to do, stop the load balancing
3592 * work being done for other cpus. Next load
3593 * balancing owner will pick it up.
3594 */
3595 if (need_resched()) {
3596 this_rq->nohz_balance_kick = 0;
3597 break;
3598 }
3599
3600 raw_spin_lock_irq(&this_rq->lock);
5343bdb8 3601 update_rq_clock(this_rq);
83cd4fe2
VP
3602 update_cpu_load(this_rq);
3603 raw_spin_unlock_irq(&this_rq->lock);
3604
3605 rebalance_domains(balance_cpu, CPU_IDLE);
3606
3607 rq = cpu_rq(balance_cpu);
3608 if (time_after(this_rq->next_balance, rq->next_balance))
3609 this_rq->next_balance = rq->next_balance;
3610 }
3611 nohz.next_balance = this_rq->next_balance;
3612 this_rq->nohz_balance_kick = 0;
3613}
3614
3615/*
3616 * Current heuristic for kicking the idle load balancer
3617 * - first_pick_cpu is the one of the busy CPUs. It will kick
3618 * idle load balancer when it has more than one process active. This
3619 * eliminates the need for idle load balancing altogether when we have
3620 * only one running process in the system (common case).
3621 * - If there are more than one busy CPU, idle load balancer may have
3622 * to run for active_load_balance to happen (i.e., two busy CPUs are
3623 * SMT or core siblings and can run better if they move to different
3624 * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
3625 * which will kick idle load balancer as soon as it has any load.
3626 */
3627static inline int nohz_kick_needed(struct rq *rq, int cpu)
3628{
3629 unsigned long now = jiffies;
3630 int ret;
3631 int first_pick_cpu, second_pick_cpu;
3632
3633 if (time_before(now, nohz.next_balance))
3634 return 0;
3635
3636 if (!rq->nr_running)
3637 return 0;
3638
3639 first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
3640 second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
3641
3642 if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
3643 second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
3644 return 0;
3645
3646 ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
3647 if (ret == nr_cpu_ids || ret == cpu) {
3648 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
3649 if (rq->nr_running > 1)
3650 return 1;
3651 } else {
3652 ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
3653 if (ret == nr_cpu_ids || ret == cpu) {
3654 if (rq->nr_running)
3655 return 1;
3656 }
3657 }
3658 return 0;
3659}
3660#else
3661static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
3662#endif
3663
3664/*
3665 * run_rebalance_domains is triggered when needed from the scheduler tick.
3666 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
3667 */
1e3c88bd
PZ
3668static void run_rebalance_domains(struct softirq_action *h)
3669{
3670 int this_cpu = smp_processor_id();
3671 struct rq *this_rq = cpu_rq(this_cpu);
3672 enum cpu_idle_type idle = this_rq->idle_at_tick ?
3673 CPU_IDLE : CPU_NOT_IDLE;
3674
3675 rebalance_domains(this_cpu, idle);
3676
1e3c88bd 3677 /*
83cd4fe2 3678 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
3679 * balancing on behalf of the other idle cpus whose ticks are
3680 * stopped.
3681 */
83cd4fe2 3682 nohz_idle_balance(this_cpu, idle);
1e3c88bd
PZ
3683}
3684
3685static inline int on_null_domain(int cpu)
3686{
90a6501f 3687 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
1e3c88bd
PZ
3688}
3689
3690/*
3691 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd
PZ
3692 */
3693static inline void trigger_load_balance(struct rq *rq, int cpu)
3694{
1e3c88bd
PZ
3695 /* Don't need to rebalance while attached to NULL domain */
3696 if (time_after_eq(jiffies, rq->next_balance) &&
3697 likely(!on_null_domain(cpu)))
3698 raise_softirq(SCHED_SOFTIRQ);
83cd4fe2
VP
3699#ifdef CONFIG_NO_HZ
3700 else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
3701 nohz_balancer_kick(cpu);
3702#endif
1e3c88bd
PZ
3703}
3704
0bcdcf28
CE
3705static void rq_online_fair(struct rq *rq)
3706{
3707 update_sysctl();
3708}
3709
3710static void rq_offline_fair(struct rq *rq)
3711{
3712 update_sysctl();
3713}
3714
1e3c88bd
PZ
3715#else /* CONFIG_SMP */
3716
3717/*
3718 * on UP we do not need to balance between CPUs:
3719 */
3720static inline void idle_balance(int cpu, struct rq *rq)
3721{
3722}
3723
55e12e5e 3724#endif /* CONFIG_SMP */
e1d1484f 3725
bf0f6f24
IM
3726/*
3727 * scheduler tick hitting a task of our scheduling class:
3728 */
8f4d37ec 3729static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
3730{
3731 struct cfs_rq *cfs_rq;
3732 struct sched_entity *se = &curr->se;
3733
3734 for_each_sched_entity(se) {
3735 cfs_rq = cfs_rq_of(se);
8f4d37ec 3736 entity_tick(cfs_rq, se, queued);
bf0f6f24
IM
3737 }
3738}
3739
3740/*
cd29fe6f
PZ
3741 * called on fork with the child task as argument from the parent's context
3742 * - child not yet on the tasklist
3743 * - preemption disabled
bf0f6f24 3744 */
cd29fe6f 3745static void task_fork_fair(struct task_struct *p)
bf0f6f24 3746{
cd29fe6f 3747 struct cfs_rq *cfs_rq = task_cfs_rq(current);
429d43bc 3748 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
00bf7bfc 3749 int this_cpu = smp_processor_id();
cd29fe6f
PZ
3750 struct rq *rq = this_rq();
3751 unsigned long flags;
3752
05fa785c 3753 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 3754
cd29fe6f
PZ
3755 if (unlikely(task_cpu(p) != this_cpu))
3756 __set_task_cpu(p, this_cpu);
bf0f6f24 3757
7109c442 3758 update_curr(cfs_rq);
cd29fe6f 3759
b5d9d734
MG
3760 if (curr)
3761 se->vruntime = curr->vruntime;
aeb73b04 3762 place_entity(cfs_rq, se, 1);
4d78e7b6 3763
cd29fe6f 3764 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 3765 /*
edcb60a3
IM
3766 * Upon rescheduling, sched_class::put_prev_task() will place
3767 * 'current' within the tree based on its new key value.
3768 */
4d78e7b6 3769 swap(curr->vruntime, se->vruntime);
aec0a514 3770 resched_task(rq->curr);
4d78e7b6 3771 }
bf0f6f24 3772
88ec22d3
PZ
3773 se->vruntime -= cfs_rq->min_vruntime;
3774
05fa785c 3775 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
3776}
3777
cb469845
SR
3778/*
3779 * Priority of the task has changed. Check to see if we preempt
3780 * the current task.
3781 */
3782static void prio_changed_fair(struct rq *rq, struct task_struct *p,
3783 int oldprio, int running)
3784{
3785 /*
3786 * Reschedule if we are currently running on this runqueue and
3787 * our priority decreased, or if we are not currently running on
3788 * this runqueue and our priority is higher than the current's
3789 */
3790 if (running) {
3791 if (p->prio > oldprio)
3792 resched_task(rq->curr);
3793 } else
15afe09b 3794 check_preempt_curr(rq, p, 0);
cb469845
SR
3795}
3796
3797/*
3798 * We switched to the sched_fair class.
3799 */
3800static void switched_to_fair(struct rq *rq, struct task_struct *p,
3801 int running)
3802{
3803 /*
3804 * We were most likely switched from sched_rt, so
3805 * kick off the schedule if running, otherwise just see
3806 * if we can still preempt the current task.
3807 */
3808 if (running)
3809 resched_task(rq->curr);
3810 else
15afe09b 3811 check_preempt_curr(rq, p, 0);
cb469845
SR
3812}
3813
83b699ed
SV
3814/* Account for a task changing its policy or group.
3815 *
3816 * This routine is mostly called to set cfs_rq->curr field when a task
3817 * migrates between groups/classes.
3818 */
3819static void set_curr_task_fair(struct rq *rq)
3820{
3821 struct sched_entity *se = &rq->curr->se;
3822
3823 for_each_sched_entity(se)
3824 set_next_entity(cfs_rq_of(se), se);
3825}
3826
810b3817 3827#ifdef CONFIG_FAIR_GROUP_SCHED
88ec22d3 3828static void moved_group_fair(struct task_struct *p, int on_rq)
810b3817
PZ
3829{
3830 struct cfs_rq *cfs_rq = task_cfs_rq(p);
3831
3832 update_curr(cfs_rq);
88ec22d3
PZ
3833 if (!on_rq)
3834 place_entity(cfs_rq, &p->se, 1);
810b3817
PZ
3835}
3836#endif
3837
6d686f45 3838static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
3839{
3840 struct sched_entity *se = &task->se;
0d721cea
PW
3841 unsigned int rr_interval = 0;
3842
3843 /*
3844 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
3845 * idle runqueue:
3846 */
0d721cea
PW
3847 if (rq->cfs.load.weight)
3848 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
0d721cea
PW
3849
3850 return rr_interval;
3851}
3852
bf0f6f24
IM
3853/*
3854 * All the scheduling class methods:
3855 */
5522d5d5
IM
3856static const struct sched_class fair_sched_class = {
3857 .next = &idle_sched_class,
bf0f6f24
IM
3858 .enqueue_task = enqueue_task_fair,
3859 .dequeue_task = dequeue_task_fair,
3860 .yield_task = yield_task_fair,
3861
2e09bf55 3862 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
3863
3864 .pick_next_task = pick_next_task_fair,
3865 .put_prev_task = put_prev_task_fair,
3866
681f3e68 3867#ifdef CONFIG_SMP
4ce72a2c
LZ
3868 .select_task_rq = select_task_rq_fair,
3869
0bcdcf28
CE
3870 .rq_online = rq_online_fair,
3871 .rq_offline = rq_offline_fair,
88ec22d3
PZ
3872
3873 .task_waking = task_waking_fair,
681f3e68 3874#endif
bf0f6f24 3875
83b699ed 3876 .set_curr_task = set_curr_task_fair,
bf0f6f24 3877 .task_tick = task_tick_fair,
cd29fe6f 3878 .task_fork = task_fork_fair,
cb469845
SR
3879
3880 .prio_changed = prio_changed_fair,
3881 .switched_to = switched_to_fair,
810b3817 3882
0d721cea
PW
3883 .get_rr_interval = get_rr_interval_fair,
3884
810b3817
PZ
3885#ifdef CONFIG_FAIR_GROUP_SCHED
3886 .moved_group = moved_group_fair,
3887#endif
bf0f6f24
IM
3888};
3889
3890#ifdef CONFIG_SCHED_DEBUG
5cef9eca 3891static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 3892{
bf0f6f24
IM
3893 struct cfs_rq *cfs_rq;
3894
5973e5b9 3895 rcu_read_lock();
c3b64f1e 3896 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 3897 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 3898 rcu_read_unlock();
bf0f6f24
IM
3899}
3900#endif