]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched.c
kprobes: Remove redundant address check
[net-next-2.6.git] / kernel / sched.c
CommitLineData
1da177e4
LT
1/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4
LT
34#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
c59ede7b 38#include <linux/capability.h>
1da177e4
LT
39#include <linux/completion.h>
40#include <linux/kernel_stat.h>
9a11b49a 41#include <linux/debug_locks.h>
cdd6c482 42#include <linux/perf_event.h>
1da177e4
LT
43#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
7dfb7103 46#include <linux/freezer.h>
198e2f18 47#include <linux/vmalloc.h>
1da177e4
LT
48#include <linux/blkdev.h>
49#include <linux/delay.h>
b488893a 50#include <linux/pid_namespace.h>
1da177e4
LT
51#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
b5aadf7f 58#include <linux/proc_fs.h>
1da177e4 59#include <linux/seq_file.h>
969c7921 60#include <linux/stop_machine.h>
e692ab53 61#include <linux/sysctl.h>
1da177e4
LT
62#include <linux/syscalls.h>
63#include <linux/times.h>
8f0ab514 64#include <linux/tsacct_kern.h>
c6fd91f0 65#include <linux/kprobes.h>
0ff92245 66#include <linux/delayacct.h>
dff06c15 67#include <linux/unistd.h>
f5ff8422 68#include <linux/pagemap.h>
8f4d37ec 69#include <linux/hrtimer.h>
30914a58 70#include <linux/tick.h>
f00b45c1
PZ
71#include <linux/debugfs.h>
72#include <linux/ctype.h>
6cd8a4bb 73#include <linux/ftrace.h>
5a0e3ad6 74#include <linux/slab.h>
1da177e4 75
5517d86b 76#include <asm/tlb.h>
838225b4 77#include <asm/irq_regs.h>
1da177e4 78
6e0534f2 79#include "sched_cpupri.h"
21aa9af0 80#include "workqueue_sched.h"
6e0534f2 81
a8d154b0 82#define CREATE_TRACE_POINTS
ad8d75ff 83#include <trace/events/sched.h>
a8d154b0 84
1da177e4
LT
85/*
86 * Convert user-nice values [ -20 ... 0 ... 19 ]
87 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
88 * and back.
89 */
90#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
91#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
92#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
93
94/*
95 * 'User priority' is the nice value converted to something we
96 * can work with better when scaling various scheduler parameters,
97 * it's a [ 0 ... 39 ] range.
98 */
99#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
100#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
101#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
102
103/*
d7876a08 104 * Helpers for converting nanosecond timing to jiffy resolution
1da177e4 105 */
d6322faf 106#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
1da177e4 107
6aa645ea
IM
108#define NICE_0_LOAD SCHED_LOAD_SCALE
109#define NICE_0_SHIFT SCHED_LOAD_SHIFT
110
1da177e4
LT
111/*
112 * These are the 'tuning knobs' of the scheduler:
113 *
a4ec24b4 114 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1da177e4
LT
115 * Timeslices get refilled after they expire.
116 */
1da177e4 117#define DEF_TIMESLICE (100 * HZ / 1000)
2dd73a4f 118
d0b27fa7
PZ
119/*
120 * single value that denotes runtime == period, ie unlimited time.
121 */
122#define RUNTIME_INF ((u64)~0ULL)
123
e05606d3
IM
124static inline int rt_policy(int policy)
125{
3f33a7ce 126 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
e05606d3
IM
127 return 1;
128 return 0;
129}
130
131static inline int task_has_rt_policy(struct task_struct *p)
132{
133 return rt_policy(p->policy);
134}
135
1da177e4 136/*
6aa645ea 137 * This is the priority-queue data structure of the RT scheduling class:
1da177e4 138 */
6aa645ea
IM
139struct rt_prio_array {
140 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
141 struct list_head queue[MAX_RT_PRIO];
142};
143
d0b27fa7 144struct rt_bandwidth {
ea736ed5 145 /* nests inside the rq lock: */
0986b11b 146 raw_spinlock_t rt_runtime_lock;
ea736ed5
IM
147 ktime_t rt_period;
148 u64 rt_runtime;
149 struct hrtimer rt_period_timer;
d0b27fa7
PZ
150};
151
152static struct rt_bandwidth def_rt_bandwidth;
153
154static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
155
156static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
157{
158 struct rt_bandwidth *rt_b =
159 container_of(timer, struct rt_bandwidth, rt_period_timer);
160 ktime_t now;
161 int overrun;
162 int idle = 0;
163
164 for (;;) {
165 now = hrtimer_cb_get_time(timer);
166 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
167
168 if (!overrun)
169 break;
170
171 idle = do_sched_rt_period_timer(rt_b, overrun);
172 }
173
174 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
175}
176
177static
178void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
179{
180 rt_b->rt_period = ns_to_ktime(period);
181 rt_b->rt_runtime = runtime;
182
0986b11b 183 raw_spin_lock_init(&rt_b->rt_runtime_lock);
ac086bc2 184
d0b27fa7
PZ
185 hrtimer_init(&rt_b->rt_period_timer,
186 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
187 rt_b->rt_period_timer.function = sched_rt_period_timer;
d0b27fa7
PZ
188}
189
c8bfff6d
KH
190static inline int rt_bandwidth_enabled(void)
191{
192 return sysctl_sched_rt_runtime >= 0;
d0b27fa7
PZ
193}
194
195static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
196{
197 ktime_t now;
198
cac64d00 199 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
d0b27fa7
PZ
200 return;
201
202 if (hrtimer_active(&rt_b->rt_period_timer))
203 return;
204
0986b11b 205 raw_spin_lock(&rt_b->rt_runtime_lock);
d0b27fa7 206 for (;;) {
7f1e2ca9
PZ
207 unsigned long delta;
208 ktime_t soft, hard;
209
d0b27fa7
PZ
210 if (hrtimer_active(&rt_b->rt_period_timer))
211 break;
212
213 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
214 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
7f1e2ca9
PZ
215
216 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
217 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
218 delta = ktime_to_ns(ktime_sub(hard, soft));
219 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
5c333864 220 HRTIMER_MODE_ABS_PINNED, 0);
d0b27fa7 221 }
0986b11b 222 raw_spin_unlock(&rt_b->rt_runtime_lock);
d0b27fa7
PZ
223}
224
225#ifdef CONFIG_RT_GROUP_SCHED
226static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
227{
228 hrtimer_cancel(&rt_b->rt_period_timer);
229}
230#endif
231
712555ee
HC
232/*
233 * sched_domains_mutex serializes calls to arch_init_sched_domains,
234 * detach_destroy_domains and partition_sched_domains.
235 */
236static DEFINE_MUTEX(sched_domains_mutex);
237
7c941438 238#ifdef CONFIG_CGROUP_SCHED
29f59db3 239
68318b8e
SV
240#include <linux/cgroup.h>
241
29f59db3
SV
242struct cfs_rq;
243
6f505b16
PZ
244static LIST_HEAD(task_groups);
245
29f59db3 246/* task group related information */
4cf86d77 247struct task_group {
68318b8e 248 struct cgroup_subsys_state css;
6c415b92 249
052f1dc7 250#ifdef CONFIG_FAIR_GROUP_SCHED
29f59db3
SV
251 /* schedulable entities of this group on each cpu */
252 struct sched_entity **se;
253 /* runqueue "owned" by this group on each cpu */
254 struct cfs_rq **cfs_rq;
255 unsigned long shares;
052f1dc7
PZ
256#endif
257
258#ifdef CONFIG_RT_GROUP_SCHED
259 struct sched_rt_entity **rt_se;
260 struct rt_rq **rt_rq;
261
d0b27fa7 262 struct rt_bandwidth rt_bandwidth;
052f1dc7 263#endif
6b2d7700 264
ae8393e5 265 struct rcu_head rcu;
6f505b16 266 struct list_head list;
f473aa5e
PZ
267
268 struct task_group *parent;
269 struct list_head siblings;
270 struct list_head children;
29f59db3
SV
271};
272
eff766a6 273#define root_task_group init_task_group
6f505b16 274
8ed36996 275/* task_group_lock serializes add/remove of task groups and also changes to
ec2c507f
SV
276 * a task group's cpu shares.
277 */
8ed36996 278static DEFINE_SPINLOCK(task_group_lock);
ec2c507f 279
e9036b36
CG
280#ifdef CONFIG_FAIR_GROUP_SCHED
281
57310a98
PZ
282#ifdef CONFIG_SMP
283static int root_task_group_empty(void)
284{
285 return list_empty(&root_task_group.children);
286}
287#endif
288
052f1dc7 289# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
052f1dc7 290
cb4ad1ff 291/*
2e084786
LJ
292 * A weight of 0 or 1 can cause arithmetics problems.
293 * A weight of a cfs_rq is the sum of weights of which entities
294 * are queued on this cfs_rq, so a weight of a entity should not be
295 * too large, so as the shares value of a task group.
cb4ad1ff
MX
296 * (The default weight is 1024 - so there's no practical
297 * limitation from this.)
298 */
18d95a28 299#define MIN_SHARES 2
2e084786 300#define MAX_SHARES (1UL << 18)
18d95a28 301
052f1dc7
PZ
302static int init_task_group_load = INIT_TASK_GROUP_LOAD;
303#endif
304
29f59db3 305/* Default task group.
3a252015 306 * Every task in system belong to this group at bootup.
29f59db3 307 */
434d53b0 308struct task_group init_task_group;
29f59db3 309
7c941438 310#endif /* CONFIG_CGROUP_SCHED */
29f59db3 311
6aa645ea
IM
312/* CFS-related fields in a runqueue */
313struct cfs_rq {
314 struct load_weight load;
315 unsigned long nr_running;
316
6aa645ea 317 u64 exec_clock;
e9acbff6 318 u64 min_vruntime;
6aa645ea
IM
319
320 struct rb_root tasks_timeline;
321 struct rb_node *rb_leftmost;
4a55bd5e
PZ
322
323 struct list_head tasks;
324 struct list_head *balance_iterator;
325
326 /*
327 * 'curr' points to currently running entity on this cfs_rq.
6aa645ea
IM
328 * It is set to NULL otherwise (i.e when none are currently running).
329 */
4793241b 330 struct sched_entity *curr, *next, *last;
ddc97297 331
5ac5c4d6 332 unsigned int nr_spread_over;
ddc97297 333
62160e3f 334#ifdef CONFIG_FAIR_GROUP_SCHED
6aa645ea
IM
335 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
336
41a2d6cf
IM
337 /*
338 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
6aa645ea
IM
339 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
340 * (like users, containers etc.)
341 *
342 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
343 * list is used during load balance.
344 */
41a2d6cf
IM
345 struct list_head leaf_cfs_rq_list;
346 struct task_group *tg; /* group that "owns" this runqueue */
c09595f6
PZ
347
348#ifdef CONFIG_SMP
c09595f6 349 /*
c8cba857 350 * the part of load.weight contributed by tasks
c09595f6 351 */
c8cba857 352 unsigned long task_weight;
c09595f6 353
c8cba857
PZ
354 /*
355 * h_load = weight * f(tg)
356 *
357 * Where f(tg) is the recursive weight fraction assigned to
358 * this group.
359 */
360 unsigned long h_load;
c09595f6 361
c8cba857
PZ
362 /*
363 * this cpu's part of tg->shares
364 */
365 unsigned long shares;
f1d239f7
PZ
366
367 /*
368 * load.weight at the time we set shares
369 */
370 unsigned long rq_weight;
c09595f6 371#endif
6aa645ea
IM
372#endif
373};
1da177e4 374
6aa645ea
IM
375/* Real-Time classes' related field in a runqueue: */
376struct rt_rq {
377 struct rt_prio_array active;
63489e45 378 unsigned long rt_nr_running;
052f1dc7 379#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
e864c499
GH
380 struct {
381 int curr; /* highest queued rt task prio */
398a153b 382#ifdef CONFIG_SMP
e864c499 383 int next; /* next highest */
398a153b 384#endif
e864c499 385 } highest_prio;
6f505b16 386#endif
fa85ae24 387#ifdef CONFIG_SMP
73fe6aae 388 unsigned long rt_nr_migratory;
a1ba4d8b 389 unsigned long rt_nr_total;
a22d7fc1 390 int overloaded;
917b627d 391 struct plist_head pushable_tasks;
fa85ae24 392#endif
6f505b16 393 int rt_throttled;
fa85ae24 394 u64 rt_time;
ac086bc2 395 u64 rt_runtime;
ea736ed5 396 /* Nests inside the rq lock: */
0986b11b 397 raw_spinlock_t rt_runtime_lock;
6f505b16 398
052f1dc7 399#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc
PZ
400 unsigned long rt_nr_boosted;
401
6f505b16
PZ
402 struct rq *rq;
403 struct list_head leaf_rt_rq_list;
404 struct task_group *tg;
6f505b16 405#endif
6aa645ea
IM
406};
407
57d885fe
GH
408#ifdef CONFIG_SMP
409
410/*
411 * We add the notion of a root-domain which will be used to define per-domain
0eab9146
IM
412 * variables. Each exclusive cpuset essentially defines an island domain by
413 * fully partitioning the member cpus from any other cpuset. Whenever a new
57d885fe
GH
414 * exclusive cpuset is created, we also create and attach a new root-domain
415 * object.
416 *
57d885fe
GH
417 */
418struct root_domain {
419 atomic_t refcount;
c6c4927b
RR
420 cpumask_var_t span;
421 cpumask_var_t online;
637f5085 422
0eab9146 423 /*
637f5085
GH
424 * The "RT overload" flag: it gets set if a CPU has more than
425 * one runnable RT task.
426 */
c6c4927b 427 cpumask_var_t rto_mask;
0eab9146 428 atomic_t rto_count;
6e0534f2
GH
429#ifdef CONFIG_SMP
430 struct cpupri cpupri;
431#endif
57d885fe
GH
432};
433
dc938520
GH
434/*
435 * By default the system creates a single root-domain with all cpus as
436 * members (mimicking the global state we have today).
437 */
57d885fe
GH
438static struct root_domain def_root_domain;
439
440#endif
441
1da177e4
LT
442/*
443 * This is the main, per-CPU runqueue data structure.
444 *
445 * Locking rule: those places that want to lock multiple runqueues
446 * (such as the load balancing or the thread migration code), lock
447 * acquire operations must be ordered by ascending &runqueue.
448 */
70b97a7f 449struct rq {
d8016491 450 /* runqueue lock: */
05fa785c 451 raw_spinlock_t lock;
1da177e4
LT
452
453 /*
454 * nr_running and cpu_load should be in the same cacheline because
455 * remote CPUs use both these fields when doing load calculation.
456 */
457 unsigned long nr_running;
6aa645ea
IM
458 #define CPU_LOAD_IDX_MAX 5
459 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
fdf3e95d 460 unsigned long last_load_update_tick;
46cb4b7c 461#ifdef CONFIG_NO_HZ
39c0cbe2 462 u64 nohz_stamp;
83cd4fe2 463 unsigned char nohz_balance_kick;
46cb4b7c 464#endif
a64692a3
MG
465 unsigned int skip_clock_update;
466
d8016491
IM
467 /* capture load from *all* tasks on this cpu: */
468 struct load_weight load;
6aa645ea
IM
469 unsigned long nr_load_updates;
470 u64 nr_switches;
471
472 struct cfs_rq cfs;
6f505b16 473 struct rt_rq rt;
6f505b16 474
6aa645ea 475#ifdef CONFIG_FAIR_GROUP_SCHED
d8016491
IM
476 /* list of leaf cfs_rq on this cpu: */
477 struct list_head leaf_cfs_rq_list;
052f1dc7
PZ
478#endif
479#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 480 struct list_head leaf_rt_rq_list;
1da177e4 481#endif
1da177e4
LT
482
483 /*
484 * This is part of a global counter where only the total sum
485 * over all CPUs matters. A task can increase this counter on
486 * one CPU and if it got migrated afterwards it may decrease
487 * it on another CPU. Always updated under the runqueue lock:
488 */
489 unsigned long nr_uninterruptible;
490
36c8b586 491 struct task_struct *curr, *idle;
c9819f45 492 unsigned long next_balance;
1da177e4 493 struct mm_struct *prev_mm;
6aa645ea 494
3e51f33f 495 u64 clock;
6aa645ea 496
1da177e4
LT
497 atomic_t nr_iowait;
498
499#ifdef CONFIG_SMP
0eab9146 500 struct root_domain *rd;
1da177e4
LT
501 struct sched_domain *sd;
502
e51fd5e2
PZ
503 unsigned long cpu_power;
504
a0a522ce 505 unsigned char idle_at_tick;
1da177e4 506 /* For active balancing */
3f029d3c 507 int post_schedule;
1da177e4
LT
508 int active_balance;
509 int push_cpu;
969c7921 510 struct cpu_stop_work active_balance_work;
d8016491
IM
511 /* cpu of this runqueue: */
512 int cpu;
1f11eb6a 513 int online;
1da177e4 514
a8a51d5e 515 unsigned long avg_load_per_task;
1da177e4 516
e9e9250b
PZ
517 u64 rt_avg;
518 u64 age_stamp;
1b9508f6
MG
519 u64 idle_stamp;
520 u64 avg_idle;
1da177e4
LT
521#endif
522
dce48a84
TG
523 /* calc_load related fields */
524 unsigned long calc_load_update;
525 long calc_load_active;
526
8f4d37ec 527#ifdef CONFIG_SCHED_HRTICK
31656519
PZ
528#ifdef CONFIG_SMP
529 int hrtick_csd_pending;
530 struct call_single_data hrtick_csd;
531#endif
8f4d37ec
PZ
532 struct hrtimer hrtick_timer;
533#endif
534
1da177e4
LT
535#ifdef CONFIG_SCHEDSTATS
536 /* latency stats */
537 struct sched_info rq_sched_info;
9c2c4802
KC
538 unsigned long long rq_cpu_time;
539 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
1da177e4
LT
540
541 /* sys_sched_yield() stats */
480b9434 542 unsigned int yld_count;
1da177e4
LT
543
544 /* schedule() stats */
480b9434
KC
545 unsigned int sched_switch;
546 unsigned int sched_count;
547 unsigned int sched_goidle;
1da177e4
LT
548
549 /* try_to_wake_up() stats */
480b9434
KC
550 unsigned int ttwu_count;
551 unsigned int ttwu_local;
b8efb561
IM
552
553 /* BKL stats */
480b9434 554 unsigned int bkl_count;
1da177e4
LT
555#endif
556};
557
f34e3b61 558static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1da177e4 559
7d478721
PZ
560static inline
561void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
dd41f596 562{
7d478721 563 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
a64692a3
MG
564
565 /*
566 * A queue event has occurred, and we're going to schedule. In
567 * this case, we can save a useless back to back clock update.
568 */
569 if (test_tsk_need_resched(p))
570 rq->skip_clock_update = 1;
dd41f596
IM
571}
572
0a2966b4
CL
573static inline int cpu_of(struct rq *rq)
574{
575#ifdef CONFIG_SMP
576 return rq->cpu;
577#else
578 return 0;
579#endif
580}
581
497f0ab3 582#define rcu_dereference_check_sched_domain(p) \
d11c563d
PM
583 rcu_dereference_check((p), \
584 rcu_read_lock_sched_held() || \
585 lockdep_is_held(&sched_domains_mutex))
586
674311d5
NP
587/*
588 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1a20ff27 589 * See detach_destroy_domains: synchronize_sched for details.
674311d5
NP
590 *
591 * The domain tree of any CPU may only be accessed from within
592 * preempt-disabled sections.
593 */
48f24c4d 594#define for_each_domain(cpu, __sd) \
497f0ab3 595 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
1da177e4
LT
596
597#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
598#define this_rq() (&__get_cpu_var(runqueues))
599#define task_rq(p) cpu_rq(task_cpu(p))
600#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
54d35f29 601#define raw_rq() (&__raw_get_cpu_var(runqueues))
1da177e4 602
dc61b1d6
PZ
603#ifdef CONFIG_CGROUP_SCHED
604
605/*
606 * Return the group to which this tasks belongs.
607 *
608 * We use task_subsys_state_check() and extend the RCU verification
609 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
610 * holds that lock for each task it moves into the cgroup. Therefore
611 * by holding that lock, we pin the task to the current cgroup.
612 */
613static inline struct task_group *task_group(struct task_struct *p)
614{
615 struct cgroup_subsys_state *css;
616
617 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
618 lockdep_is_held(&task_rq(p)->lock));
619 return container_of(css, struct task_group, css);
620}
621
622/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
623static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
624{
625#ifdef CONFIG_FAIR_GROUP_SCHED
626 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
627 p->se.parent = task_group(p)->se[cpu];
628#endif
629
630#ifdef CONFIG_RT_GROUP_SCHED
631 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
632 p->rt.parent = task_group(p)->rt_se[cpu];
633#endif
634}
635
636#else /* CONFIG_CGROUP_SCHED */
637
638static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
639static inline struct task_group *task_group(struct task_struct *p)
640{
641 return NULL;
642}
643
644#endif /* CONFIG_CGROUP_SCHED */
645
aa9c4c0f 646inline void update_rq_clock(struct rq *rq)
3e51f33f 647{
a64692a3
MG
648 if (!rq->skip_clock_update)
649 rq->clock = sched_clock_cpu(cpu_of(rq));
3e51f33f
PZ
650}
651
bf5c91ba
IM
652/*
653 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
654 */
655#ifdef CONFIG_SCHED_DEBUG
656# define const_debug __read_mostly
657#else
658# define const_debug static const
659#endif
660
017730c1
IM
661/**
662 * runqueue_is_locked
e17b38bf 663 * @cpu: the processor in question.
017730c1
IM
664 *
665 * Returns true if the current cpu runqueue is locked.
666 * This interface allows printk to be called with the runqueue lock
667 * held and know whether or not it is OK to wake up the klogd.
668 */
89f19f04 669int runqueue_is_locked(int cpu)
017730c1 670{
05fa785c 671 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
017730c1
IM
672}
673
bf5c91ba
IM
674/*
675 * Debugging: various feature bits
676 */
f00b45c1
PZ
677
678#define SCHED_FEAT(name, enabled) \
679 __SCHED_FEAT_##name ,
680
bf5c91ba 681enum {
f00b45c1 682#include "sched_features.h"
bf5c91ba
IM
683};
684
f00b45c1
PZ
685#undef SCHED_FEAT
686
687#define SCHED_FEAT(name, enabled) \
688 (1UL << __SCHED_FEAT_##name) * enabled |
689
bf5c91ba 690const_debug unsigned int sysctl_sched_features =
f00b45c1
PZ
691#include "sched_features.h"
692 0;
693
694#undef SCHED_FEAT
695
696#ifdef CONFIG_SCHED_DEBUG
697#define SCHED_FEAT(name, enabled) \
698 #name ,
699
983ed7a6 700static __read_mostly char *sched_feat_names[] = {
f00b45c1
PZ
701#include "sched_features.h"
702 NULL
703};
704
705#undef SCHED_FEAT
706
34f3a814 707static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 708{
f00b45c1
PZ
709 int i;
710
711 for (i = 0; sched_feat_names[i]; i++) {
34f3a814
LZ
712 if (!(sysctl_sched_features & (1UL << i)))
713 seq_puts(m, "NO_");
714 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 715 }
34f3a814 716 seq_puts(m, "\n");
f00b45c1 717
34f3a814 718 return 0;
f00b45c1
PZ
719}
720
721static ssize_t
722sched_feat_write(struct file *filp, const char __user *ubuf,
723 size_t cnt, loff_t *ppos)
724{
725 char buf[64];
726 char *cmp = buf;
727 int neg = 0;
728 int i;
729
730 if (cnt > 63)
731 cnt = 63;
732
733 if (copy_from_user(&buf, ubuf, cnt))
734 return -EFAULT;
735
736 buf[cnt] = 0;
737
c24b7c52 738 if (strncmp(buf, "NO_", 3) == 0) {
f00b45c1
PZ
739 neg = 1;
740 cmp += 3;
741 }
742
743 for (i = 0; sched_feat_names[i]; i++) {
744 int len = strlen(sched_feat_names[i]);
745
746 if (strncmp(cmp, sched_feat_names[i], len) == 0) {
747 if (neg)
748 sysctl_sched_features &= ~(1UL << i);
749 else
750 sysctl_sched_features |= (1UL << i);
751 break;
752 }
753 }
754
755 if (!sched_feat_names[i])
756 return -EINVAL;
757
42994724 758 *ppos += cnt;
f00b45c1
PZ
759
760 return cnt;
761}
762
34f3a814
LZ
763static int sched_feat_open(struct inode *inode, struct file *filp)
764{
765 return single_open(filp, sched_feat_show, NULL);
766}
767
828c0950 768static const struct file_operations sched_feat_fops = {
34f3a814
LZ
769 .open = sched_feat_open,
770 .write = sched_feat_write,
771 .read = seq_read,
772 .llseek = seq_lseek,
773 .release = single_release,
f00b45c1
PZ
774};
775
776static __init int sched_init_debug(void)
777{
f00b45c1
PZ
778 debugfs_create_file("sched_features", 0644, NULL, NULL,
779 &sched_feat_fops);
780
781 return 0;
782}
783late_initcall(sched_init_debug);
784
785#endif
786
787#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
bf5c91ba 788
b82d9fdd
PZ
789/*
790 * Number of tasks to iterate in a single balance run.
791 * Limited because this is done with IRQs disabled.
792 */
793const_debug unsigned int sysctl_sched_nr_migrate = 32;
794
2398f2c6
PZ
795/*
796 * ratelimit for updating the group shares.
55cd5340 797 * default: 0.25ms
2398f2c6 798 */
55cd5340 799unsigned int sysctl_sched_shares_ratelimit = 250000;
0bcdcf28 800unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
2398f2c6 801
ffda12a1
PZ
802/*
803 * Inject some fuzzyness into changing the per-cpu group shares
804 * this avoids remote rq-locks at the expense of fairness.
805 * default: 4
806 */
807unsigned int sysctl_sched_shares_thresh = 4;
808
e9e9250b
PZ
809/*
810 * period over which we average the RT time consumption, measured
811 * in ms.
812 *
813 * default: 1s
814 */
815const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
816
fa85ae24 817/*
9f0c1e56 818 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
819 * default: 1s
820 */
9f0c1e56 821unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 822
6892b75e
IM
823static __read_mostly int scheduler_running;
824
9f0c1e56
PZ
825/*
826 * part of the period that we allow rt tasks to run in us.
827 * default: 0.95s
828 */
829int sysctl_sched_rt_runtime = 950000;
fa85ae24 830
d0b27fa7
PZ
831static inline u64 global_rt_period(void)
832{
833 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
834}
835
836static inline u64 global_rt_runtime(void)
837{
e26873bb 838 if (sysctl_sched_rt_runtime < 0)
d0b27fa7
PZ
839 return RUNTIME_INF;
840
841 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
842}
fa85ae24 843
1da177e4 844#ifndef prepare_arch_switch
4866cde0
NP
845# define prepare_arch_switch(next) do { } while (0)
846#endif
847#ifndef finish_arch_switch
848# define finish_arch_switch(prev) do { } while (0)
849#endif
850
051a1d1a
DA
851static inline int task_current(struct rq *rq, struct task_struct *p)
852{
853 return rq->curr == p;
854}
855
4866cde0 856#ifndef __ARCH_WANT_UNLOCKED_CTXSW
70b97a7f 857static inline int task_running(struct rq *rq, struct task_struct *p)
4866cde0 858{
051a1d1a 859 return task_current(rq, p);
4866cde0
NP
860}
861
70b97a7f 862static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
4866cde0
NP
863{
864}
865
70b97a7f 866static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
4866cde0 867{
da04c035
IM
868#ifdef CONFIG_DEBUG_SPINLOCK
869 /* this is a valid case when another task releases the spinlock */
870 rq->lock.owner = current;
871#endif
8a25d5de
IM
872 /*
873 * If we are tracking spinlock dependencies then we have to
874 * fix up the runqueue lock - which gets 'carried over' from
875 * prev into current:
876 */
877 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
878
05fa785c 879 raw_spin_unlock_irq(&rq->lock);
4866cde0
NP
880}
881
882#else /* __ARCH_WANT_UNLOCKED_CTXSW */
70b97a7f 883static inline int task_running(struct rq *rq, struct task_struct *p)
4866cde0
NP
884{
885#ifdef CONFIG_SMP
886 return p->oncpu;
887#else
051a1d1a 888 return task_current(rq, p);
4866cde0
NP
889#endif
890}
891
70b97a7f 892static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
4866cde0
NP
893{
894#ifdef CONFIG_SMP
895 /*
896 * We can optimise this out completely for !SMP, because the
897 * SMP rebalancing from interrupt is the only thing that cares
898 * here.
899 */
900 next->oncpu = 1;
901#endif
902#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
05fa785c 903 raw_spin_unlock_irq(&rq->lock);
4866cde0 904#else
05fa785c 905 raw_spin_unlock(&rq->lock);
4866cde0
NP
906#endif
907}
908
70b97a7f 909static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
4866cde0
NP
910{
911#ifdef CONFIG_SMP
912 /*
913 * After ->oncpu is cleared, the task can be moved to a different CPU.
914 * We must ensure this doesn't happen until the switch is completely
915 * finished.
916 */
917 smp_wmb();
918 prev->oncpu = 0;
919#endif
920#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
921 local_irq_enable();
1da177e4 922#endif
4866cde0
NP
923}
924#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
1da177e4 925
0970d299 926/*
65cc8e48
PZ
927 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
928 * against ttwu().
0970d299
PZ
929 */
930static inline int task_is_waking(struct task_struct *p)
931{
0017d735 932 return unlikely(p->state == TASK_WAKING);
0970d299
PZ
933}
934
b29739f9
IM
935/*
936 * __task_rq_lock - lock the runqueue a given task resides on.
937 * Must be called interrupts disabled.
938 */
70b97a7f 939static inline struct rq *__task_rq_lock(struct task_struct *p)
b29739f9
IM
940 __acquires(rq->lock)
941{
0970d299
PZ
942 struct rq *rq;
943
3a5c359a 944 for (;;) {
0970d299 945 rq = task_rq(p);
05fa785c 946 raw_spin_lock(&rq->lock);
65cc8e48 947 if (likely(rq == task_rq(p)))
3a5c359a 948 return rq;
05fa785c 949 raw_spin_unlock(&rq->lock);
b29739f9 950 }
b29739f9
IM
951}
952
1da177e4
LT
953/*
954 * task_rq_lock - lock the runqueue a given task resides on and disable
41a2d6cf 955 * interrupts. Note the ordering: we can safely lookup the task_rq without
1da177e4
LT
956 * explicitly disabling preemption.
957 */
70b97a7f 958static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1da177e4
LT
959 __acquires(rq->lock)
960{
70b97a7f 961 struct rq *rq;
1da177e4 962
3a5c359a
AK
963 for (;;) {
964 local_irq_save(*flags);
965 rq = task_rq(p);
05fa785c 966 raw_spin_lock(&rq->lock);
65cc8e48 967 if (likely(rq == task_rq(p)))
3a5c359a 968 return rq;
05fa785c 969 raw_spin_unlock_irqrestore(&rq->lock, *flags);
1da177e4 970 }
1da177e4
LT
971}
972
a9957449 973static void __task_rq_unlock(struct rq *rq)
b29739f9
IM
974 __releases(rq->lock)
975{
05fa785c 976 raw_spin_unlock(&rq->lock);
b29739f9
IM
977}
978
70b97a7f 979static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
1da177e4
LT
980 __releases(rq->lock)
981{
05fa785c 982 raw_spin_unlock_irqrestore(&rq->lock, *flags);
1da177e4
LT
983}
984
1da177e4 985/*
cc2a73b5 986 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 987 */
a9957449 988static struct rq *this_rq_lock(void)
1da177e4
LT
989 __acquires(rq->lock)
990{
70b97a7f 991 struct rq *rq;
1da177e4
LT
992
993 local_irq_disable();
994 rq = this_rq();
05fa785c 995 raw_spin_lock(&rq->lock);
1da177e4
LT
996
997 return rq;
998}
999
8f4d37ec
PZ
1000#ifdef CONFIG_SCHED_HRTICK
1001/*
1002 * Use HR-timers to deliver accurate preemption points.
1003 *
1004 * Its all a bit involved since we cannot program an hrt while holding the
1005 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1006 * reschedule event.
1007 *
1008 * When we get rescheduled we reprogram the hrtick_timer outside of the
1009 * rq->lock.
1010 */
8f4d37ec
PZ
1011
1012/*
1013 * Use hrtick when:
1014 * - enabled by features
1015 * - hrtimer is actually high res
1016 */
1017static inline int hrtick_enabled(struct rq *rq)
1018{
1019 if (!sched_feat(HRTICK))
1020 return 0;
ba42059f 1021 if (!cpu_active(cpu_of(rq)))
b328ca18 1022 return 0;
8f4d37ec
PZ
1023 return hrtimer_is_hres_active(&rq->hrtick_timer);
1024}
1025
8f4d37ec
PZ
1026static void hrtick_clear(struct rq *rq)
1027{
1028 if (hrtimer_active(&rq->hrtick_timer))
1029 hrtimer_cancel(&rq->hrtick_timer);
1030}
1031
8f4d37ec
PZ
1032/*
1033 * High-resolution timer tick.
1034 * Runs from hardirq context with interrupts disabled.
1035 */
1036static enum hrtimer_restart hrtick(struct hrtimer *timer)
1037{
1038 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1039
1040 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1041
05fa785c 1042 raw_spin_lock(&rq->lock);
3e51f33f 1043 update_rq_clock(rq);
8f4d37ec 1044 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 1045 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
1046
1047 return HRTIMER_NORESTART;
1048}
1049
95e904c7 1050#ifdef CONFIG_SMP
31656519
PZ
1051/*
1052 * called from hardirq (IPI) context
1053 */
1054static void __hrtick_start(void *arg)
b328ca18 1055{
31656519 1056 struct rq *rq = arg;
b328ca18 1057
05fa785c 1058 raw_spin_lock(&rq->lock);
31656519
PZ
1059 hrtimer_restart(&rq->hrtick_timer);
1060 rq->hrtick_csd_pending = 0;
05fa785c 1061 raw_spin_unlock(&rq->lock);
b328ca18
PZ
1062}
1063
31656519
PZ
1064/*
1065 * Called to set the hrtick timer state.
1066 *
1067 * called with rq->lock held and irqs disabled
1068 */
1069static void hrtick_start(struct rq *rq, u64 delay)
b328ca18 1070{
31656519
PZ
1071 struct hrtimer *timer = &rq->hrtick_timer;
1072 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
b328ca18 1073
cc584b21 1074 hrtimer_set_expires(timer, time);
31656519
PZ
1075
1076 if (rq == this_rq()) {
1077 hrtimer_restart(timer);
1078 } else if (!rq->hrtick_csd_pending) {
6e275637 1079 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
31656519
PZ
1080 rq->hrtick_csd_pending = 1;
1081 }
b328ca18
PZ
1082}
1083
1084static int
1085hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1086{
1087 int cpu = (int)(long)hcpu;
1088
1089 switch (action) {
1090 case CPU_UP_CANCELED:
1091 case CPU_UP_CANCELED_FROZEN:
1092 case CPU_DOWN_PREPARE:
1093 case CPU_DOWN_PREPARE_FROZEN:
1094 case CPU_DEAD:
1095 case CPU_DEAD_FROZEN:
31656519 1096 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
1097 return NOTIFY_OK;
1098 }
1099
1100 return NOTIFY_DONE;
1101}
1102
fa748203 1103static __init void init_hrtick(void)
b328ca18
PZ
1104{
1105 hotcpu_notifier(hotplug_hrtick, 0);
1106}
31656519
PZ
1107#else
1108/*
1109 * Called to set the hrtick timer state.
1110 *
1111 * called with rq->lock held and irqs disabled
1112 */
1113static void hrtick_start(struct rq *rq, u64 delay)
1114{
7f1e2ca9 1115 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
5c333864 1116 HRTIMER_MODE_REL_PINNED, 0);
31656519 1117}
b328ca18 1118
006c75f1 1119static inline void init_hrtick(void)
8f4d37ec 1120{
8f4d37ec 1121}
31656519 1122#endif /* CONFIG_SMP */
8f4d37ec 1123
31656519 1124static void init_rq_hrtick(struct rq *rq)
8f4d37ec 1125{
31656519
PZ
1126#ifdef CONFIG_SMP
1127 rq->hrtick_csd_pending = 0;
8f4d37ec 1128
31656519
PZ
1129 rq->hrtick_csd.flags = 0;
1130 rq->hrtick_csd.func = __hrtick_start;
1131 rq->hrtick_csd.info = rq;
1132#endif
8f4d37ec 1133
31656519
PZ
1134 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1135 rq->hrtick_timer.function = hrtick;
8f4d37ec 1136}
006c75f1 1137#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
1138static inline void hrtick_clear(struct rq *rq)
1139{
1140}
1141
8f4d37ec
PZ
1142static inline void init_rq_hrtick(struct rq *rq)
1143{
1144}
1145
b328ca18
PZ
1146static inline void init_hrtick(void)
1147{
1148}
006c75f1 1149#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 1150
c24d20db
IM
1151/*
1152 * resched_task - mark a task 'to be rescheduled now'.
1153 *
1154 * On UP this means the setting of the need_resched flag, on SMP it
1155 * might also involve a cross-CPU call to trigger the scheduler on
1156 * the target CPU.
1157 */
1158#ifdef CONFIG_SMP
1159
1160#ifndef tsk_is_polling
1161#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1162#endif
1163
31656519 1164static void resched_task(struct task_struct *p)
c24d20db
IM
1165{
1166 int cpu;
1167
05fa785c 1168 assert_raw_spin_locked(&task_rq(p)->lock);
c24d20db 1169
5ed0cec0 1170 if (test_tsk_need_resched(p))
c24d20db
IM
1171 return;
1172
5ed0cec0 1173 set_tsk_need_resched(p);
c24d20db
IM
1174
1175 cpu = task_cpu(p);
1176 if (cpu == smp_processor_id())
1177 return;
1178
1179 /* NEED_RESCHED must be visible before we test polling */
1180 smp_mb();
1181 if (!tsk_is_polling(p))
1182 smp_send_reschedule(cpu);
1183}
1184
1185static void resched_cpu(int cpu)
1186{
1187 struct rq *rq = cpu_rq(cpu);
1188 unsigned long flags;
1189
05fa785c 1190 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db
IM
1191 return;
1192 resched_task(cpu_curr(cpu));
05fa785c 1193 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 1194}
06d8308c
TG
1195
1196#ifdef CONFIG_NO_HZ
83cd4fe2
VP
1197/*
1198 * In the semi idle case, use the nearest busy cpu for migrating timers
1199 * from an idle cpu. This is good for power-savings.
1200 *
1201 * We don't do similar optimization for completely idle system, as
1202 * selecting an idle cpu will add more delays to the timers than intended
1203 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1204 */
1205int get_nohz_timer_target(void)
1206{
1207 int cpu = smp_processor_id();
1208 int i;
1209 struct sched_domain *sd;
1210
1211 for_each_domain(cpu, sd) {
1212 for_each_cpu(i, sched_domain_span(sd))
1213 if (!idle_cpu(i))
1214 return i;
1215 }
1216 return cpu;
1217}
06d8308c
TG
1218/*
1219 * When add_timer_on() enqueues a timer into the timer wheel of an
1220 * idle CPU then this timer might expire before the next timer event
1221 * which is scheduled to wake up that CPU. In case of a completely
1222 * idle system the next event might even be infinite time into the
1223 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1224 * leaves the inner idle loop so the newly added timer is taken into
1225 * account when the CPU goes back to idle and evaluates the timer
1226 * wheel for the next timer event.
1227 */
1228void wake_up_idle_cpu(int cpu)
1229{
1230 struct rq *rq = cpu_rq(cpu);
1231
1232 if (cpu == smp_processor_id())
1233 return;
1234
1235 /*
1236 * This is safe, as this function is called with the timer
1237 * wheel base lock of (cpu) held. When the CPU is on the way
1238 * to idle and has not yet set rq->curr to idle then it will
1239 * be serialized on the timer wheel base lock and take the new
1240 * timer into account automatically.
1241 */
1242 if (rq->curr != rq->idle)
1243 return;
1244
1245 /*
1246 * We can set TIF_RESCHED on the idle task of the other CPU
1247 * lockless. The worst case is that the other CPU runs the
1248 * idle task through an additional NOOP schedule()
1249 */
5ed0cec0 1250 set_tsk_need_resched(rq->idle);
06d8308c
TG
1251
1252 /* NEED_RESCHED must be visible before we test polling */
1253 smp_mb();
1254 if (!tsk_is_polling(rq->idle))
1255 smp_send_reschedule(cpu);
1256}
39c0cbe2 1257
6d6bc0ad 1258#endif /* CONFIG_NO_HZ */
06d8308c 1259
e9e9250b
PZ
1260static u64 sched_avg_period(void)
1261{
1262 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1263}
1264
1265static void sched_avg_update(struct rq *rq)
1266{
1267 s64 period = sched_avg_period();
1268
1269 while ((s64)(rq->clock - rq->age_stamp) > period) {
0d98bb26
WD
1270 /*
1271 * Inline assembly required to prevent the compiler
1272 * optimising this loop into a divmod call.
1273 * See __iter_div_u64_rem() for another example of this.
1274 */
1275 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
1276 rq->age_stamp += period;
1277 rq->rt_avg /= 2;
1278 }
1279}
1280
1281static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1282{
1283 rq->rt_avg += rt_delta;
1284 sched_avg_update(rq);
1285}
1286
6d6bc0ad 1287#else /* !CONFIG_SMP */
31656519 1288static void resched_task(struct task_struct *p)
c24d20db 1289{
05fa785c 1290 assert_raw_spin_locked(&task_rq(p)->lock);
31656519 1291 set_tsk_need_resched(p);
c24d20db 1292}
e9e9250b
PZ
1293
1294static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1295{
1296}
da2b71ed
SS
1297
1298static void sched_avg_update(struct rq *rq)
1299{
1300}
6d6bc0ad 1301#endif /* CONFIG_SMP */
c24d20db 1302
45bf76df
IM
1303#if BITS_PER_LONG == 32
1304# define WMULT_CONST (~0UL)
1305#else
1306# define WMULT_CONST (1UL << 32)
1307#endif
1308
1309#define WMULT_SHIFT 32
1310
194081eb
IM
1311/*
1312 * Shift right and round:
1313 */
cf2ab469 1314#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
194081eb 1315
a7be37ac
PZ
1316/*
1317 * delta *= weight / lw
1318 */
cb1c4fc9 1319static unsigned long
45bf76df
IM
1320calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1321 struct load_weight *lw)
1322{
1323 u64 tmp;
1324
7a232e03
LJ
1325 if (!lw->inv_weight) {
1326 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1327 lw->inv_weight = 1;
1328 else
1329 lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1330 / (lw->weight+1);
1331 }
45bf76df
IM
1332
1333 tmp = (u64)delta_exec * weight;
1334 /*
1335 * Check whether we'd overflow the 64-bit multiplication:
1336 */
194081eb 1337 if (unlikely(tmp > WMULT_CONST))
cf2ab469 1338 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
194081eb
IM
1339 WMULT_SHIFT/2);
1340 else
cf2ab469 1341 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
45bf76df 1342
ecf691da 1343 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
45bf76df
IM
1344}
1345
1091985b 1346static inline void update_load_add(struct load_weight *lw, unsigned long inc)
45bf76df
IM
1347{
1348 lw->weight += inc;
e89996ae 1349 lw->inv_weight = 0;
45bf76df
IM
1350}
1351
1091985b 1352static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
45bf76df
IM
1353{
1354 lw->weight -= dec;
e89996ae 1355 lw->inv_weight = 0;
45bf76df
IM
1356}
1357
2dd73a4f
PW
1358/*
1359 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1360 * of tasks with abnormal "nice" values across CPUs the contribution that
1361 * each task makes to its run queue's load is weighted according to its
41a2d6cf 1362 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
2dd73a4f
PW
1363 * scaled version of the new time slice allocation that they receive on time
1364 * slice expiry etc.
1365 */
1366
cce7ade8
PZ
1367#define WEIGHT_IDLEPRIO 3
1368#define WMULT_IDLEPRIO 1431655765
dd41f596
IM
1369
1370/*
1371 * Nice levels are multiplicative, with a gentle 10% change for every
1372 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1373 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1374 * that remained on nice 0.
1375 *
1376 * The "10% effect" is relative and cumulative: from _any_ nice level,
1377 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
f9153ee6
IM
1378 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1379 * If a task goes up by ~10% and another task goes down by ~10% then
1380 * the relative distance between them is ~25%.)
dd41f596
IM
1381 */
1382static const int prio_to_weight[40] = {
254753dc
IM
1383 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1384 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1385 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1386 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1387 /* 0 */ 1024, 820, 655, 526, 423,
1388 /* 5 */ 335, 272, 215, 172, 137,
1389 /* 10 */ 110, 87, 70, 56, 45,
1390 /* 15 */ 36, 29, 23, 18, 15,
dd41f596
IM
1391};
1392
5714d2de
IM
1393/*
1394 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1395 *
1396 * In cases where the weight does not change often, we can use the
1397 * precalculated inverse to speed up arithmetics by turning divisions
1398 * into multiplications:
1399 */
dd41f596 1400static const u32 prio_to_wmult[40] = {
254753dc
IM
1401 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1402 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1403 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1404 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1405 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1406 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1407 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1408 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
dd41f596 1409};
2dd73a4f 1410
ef12fefa
BR
1411/* Time spent by the tasks of the cpu accounting group executing in ... */
1412enum cpuacct_stat_index {
1413 CPUACCT_STAT_USER, /* ... user mode */
1414 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1415
1416 CPUACCT_STAT_NSTATS,
1417};
1418
d842de87
SV
1419#ifdef CONFIG_CGROUP_CPUACCT
1420static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
ef12fefa
BR
1421static void cpuacct_update_stats(struct task_struct *tsk,
1422 enum cpuacct_stat_index idx, cputime_t val);
d842de87
SV
1423#else
1424static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
ef12fefa
BR
1425static inline void cpuacct_update_stats(struct task_struct *tsk,
1426 enum cpuacct_stat_index idx, cputime_t val) {}
d842de87
SV
1427#endif
1428
18d95a28
PZ
1429static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1430{
1431 update_load_add(&rq->load, load);
1432}
1433
1434static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1435{
1436 update_load_sub(&rq->load, load);
1437}
1438
7940ca36 1439#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
eb755805 1440typedef int (*tg_visitor)(struct task_group *, void *);
c09595f6
PZ
1441
1442/*
1443 * Iterate the full tree, calling @down when first entering a node and @up when
1444 * leaving it for the final time.
1445 */
eb755805 1446static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
1447{
1448 struct task_group *parent, *child;
eb755805 1449 int ret;
c09595f6
PZ
1450
1451 rcu_read_lock();
1452 parent = &root_task_group;
1453down:
eb755805
PZ
1454 ret = (*down)(parent, data);
1455 if (ret)
1456 goto out_unlock;
c09595f6
PZ
1457 list_for_each_entry_rcu(child, &parent->children, siblings) {
1458 parent = child;
1459 goto down;
1460
1461up:
1462 continue;
1463 }
eb755805
PZ
1464 ret = (*up)(parent, data);
1465 if (ret)
1466 goto out_unlock;
c09595f6
PZ
1467
1468 child = parent;
1469 parent = parent->parent;
1470 if (parent)
1471 goto up;
eb755805 1472out_unlock:
c09595f6 1473 rcu_read_unlock();
eb755805
PZ
1474
1475 return ret;
c09595f6
PZ
1476}
1477
eb755805
PZ
1478static int tg_nop(struct task_group *tg, void *data)
1479{
1480 return 0;
c09595f6 1481}
eb755805
PZ
1482#endif
1483
1484#ifdef CONFIG_SMP
f5f08f39
PZ
1485/* Used instead of source_load when we know the type == 0 */
1486static unsigned long weighted_cpuload(const int cpu)
1487{
1488 return cpu_rq(cpu)->load.weight;
1489}
1490
1491/*
1492 * Return a low guess at the load of a migration-source cpu weighted
1493 * according to the scheduling class and "nice" value.
1494 *
1495 * We want to under-estimate the load of migration sources, to
1496 * balance conservatively.
1497 */
1498static unsigned long source_load(int cpu, int type)
1499{
1500 struct rq *rq = cpu_rq(cpu);
1501 unsigned long total = weighted_cpuload(cpu);
1502
1503 if (type == 0 || !sched_feat(LB_BIAS))
1504 return total;
1505
1506 return min(rq->cpu_load[type-1], total);
1507}
1508
1509/*
1510 * Return a high guess at the load of a migration-target cpu weighted
1511 * according to the scheduling class and "nice" value.
1512 */
1513static unsigned long target_load(int cpu, int type)
1514{
1515 struct rq *rq = cpu_rq(cpu);
1516 unsigned long total = weighted_cpuload(cpu);
1517
1518 if (type == 0 || !sched_feat(LB_BIAS))
1519 return total;
1520
1521 return max(rq->cpu_load[type-1], total);
1522}
1523
ae154be1
PZ
1524static unsigned long power_of(int cpu)
1525{
e51fd5e2 1526 return cpu_rq(cpu)->cpu_power;
ae154be1
PZ
1527}
1528
eb755805
PZ
1529static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1530
1531static unsigned long cpu_avg_load_per_task(int cpu)
1532{
1533 struct rq *rq = cpu_rq(cpu);
af6d596f 1534 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
eb755805 1535
4cd42620
SR
1536 if (nr_running)
1537 rq->avg_load_per_task = rq->load.weight / nr_running;
a2d47777
BS
1538 else
1539 rq->avg_load_per_task = 0;
eb755805
PZ
1540
1541 return rq->avg_load_per_task;
1542}
1543
1544#ifdef CONFIG_FAIR_GROUP_SCHED
c09595f6 1545
43cf38eb 1546static __read_mostly unsigned long __percpu *update_shares_data;
34d76c41 1547
c09595f6
PZ
1548static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1549
1550/*
1551 * Calculate and set the cpu's group shares.
1552 */
34d76c41
PZ
1553static void update_group_shares_cpu(struct task_group *tg, int cpu,
1554 unsigned long sd_shares,
1555 unsigned long sd_rq_weight,
4a6cc4bd 1556 unsigned long *usd_rq_weight)
18d95a28 1557{
34d76c41 1558 unsigned long shares, rq_weight;
a5004278 1559 int boost = 0;
c09595f6 1560
4a6cc4bd 1561 rq_weight = usd_rq_weight[cpu];
a5004278
PZ
1562 if (!rq_weight) {
1563 boost = 1;
1564 rq_weight = NICE_0_LOAD;
1565 }
c8cba857 1566
c09595f6 1567 /*
a8af7246
PZ
1568 * \Sum_j shares_j * rq_weight_i
1569 * shares_i = -----------------------------
1570 * \Sum_j rq_weight_j
c09595f6 1571 */
ec4e0e2f 1572 shares = (sd_shares * rq_weight) / sd_rq_weight;
ffda12a1 1573 shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
c09595f6 1574
ffda12a1
PZ
1575 if (abs(shares - tg->se[cpu]->load.weight) >
1576 sysctl_sched_shares_thresh) {
1577 struct rq *rq = cpu_rq(cpu);
1578 unsigned long flags;
c09595f6 1579
05fa785c 1580 raw_spin_lock_irqsave(&rq->lock, flags);
34d76c41 1581 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
a5004278 1582 tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
ffda12a1 1583 __set_se_shares(tg->se[cpu], shares);
05fa785c 1584 raw_spin_unlock_irqrestore(&rq->lock, flags);
ffda12a1 1585 }
18d95a28 1586}
c09595f6
PZ
1587
1588/*
c8cba857
PZ
1589 * Re-compute the task group their per cpu shares over the given domain.
1590 * This needs to be done in a bottom-up fashion because the rq weight of a
1591 * parent group depends on the shares of its child groups.
c09595f6 1592 */
eb755805 1593static int tg_shares_up(struct task_group *tg, void *data)
c09595f6 1594{
cd8ad40d 1595 unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
4a6cc4bd 1596 unsigned long *usd_rq_weight;
eb755805 1597 struct sched_domain *sd = data;
34d76c41 1598 unsigned long flags;
c8cba857 1599 int i;
c09595f6 1600
34d76c41
PZ
1601 if (!tg->se[0])
1602 return 0;
1603
1604 local_irq_save(flags);
4a6cc4bd 1605 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
34d76c41 1606
758b2cdc 1607 for_each_cpu(i, sched_domain_span(sd)) {
34d76c41 1608 weight = tg->cfs_rq[i]->load.weight;
4a6cc4bd 1609 usd_rq_weight[i] = weight;
34d76c41 1610
cd8ad40d 1611 rq_weight += weight;
ec4e0e2f
KC
1612 /*
1613 * If there are currently no tasks on the cpu pretend there
1614 * is one of average load so that when a new task gets to
1615 * run here it will not get delayed by group starvation.
1616 */
ec4e0e2f
KC
1617 if (!weight)
1618 weight = NICE_0_LOAD;
1619
cd8ad40d 1620 sum_weight += weight;
c8cba857 1621 shares += tg->cfs_rq[i]->shares;
c09595f6 1622 }
c09595f6 1623
cd8ad40d
PZ
1624 if (!rq_weight)
1625 rq_weight = sum_weight;
1626
c8cba857
PZ
1627 if ((!shares && rq_weight) || shares > tg->shares)
1628 shares = tg->shares;
1629
1630 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1631 shares = tg->shares;
c09595f6 1632
758b2cdc 1633 for_each_cpu(i, sched_domain_span(sd))
4a6cc4bd 1634 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
34d76c41
PZ
1635
1636 local_irq_restore(flags);
eb755805
PZ
1637
1638 return 0;
c09595f6
PZ
1639}
1640
1641/*
c8cba857
PZ
1642 * Compute the cpu's hierarchical load factor for each task group.
1643 * This needs to be done in a top-down fashion because the load of a child
1644 * group is a fraction of its parents load.
c09595f6 1645 */
eb755805 1646static int tg_load_down(struct task_group *tg, void *data)
c09595f6 1647{
c8cba857 1648 unsigned long load;
eb755805 1649 long cpu = (long)data;
c09595f6 1650
c8cba857
PZ
1651 if (!tg->parent) {
1652 load = cpu_rq(cpu)->load.weight;
1653 } else {
1654 load = tg->parent->cfs_rq[cpu]->h_load;
1655 load *= tg->cfs_rq[cpu]->shares;
1656 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1657 }
c09595f6 1658
c8cba857 1659 tg->cfs_rq[cpu]->h_load = load;
c09595f6 1660
eb755805 1661 return 0;
c09595f6
PZ
1662}
1663
c8cba857 1664static void update_shares(struct sched_domain *sd)
4d8d595d 1665{
e7097159
PZ
1666 s64 elapsed;
1667 u64 now;
1668
1669 if (root_task_group_empty())
1670 return;
1671
c676329a 1672 now = local_clock();
e7097159 1673 elapsed = now - sd->last_update;
2398f2c6
PZ
1674
1675 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1676 sd->last_update = now;
eb755805 1677 walk_tg_tree(tg_nop, tg_shares_up, sd);
2398f2c6 1678 }
4d8d595d
PZ
1679}
1680
eb755805 1681static void update_h_load(long cpu)
c09595f6 1682{
eb755805 1683 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
c09595f6
PZ
1684}
1685
c09595f6
PZ
1686#else
1687
c8cba857 1688static inline void update_shares(struct sched_domain *sd)
4d8d595d
PZ
1689{
1690}
1691
18d95a28
PZ
1692#endif
1693
8f45e2b5
GH
1694#ifdef CONFIG_PREEMPT
1695
b78bb868
PZ
1696static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1697
70574a99 1698/*
8f45e2b5
GH
1699 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1700 * way at the expense of forcing extra atomic operations in all
1701 * invocations. This assures that the double_lock is acquired using the
1702 * same underlying policy as the spinlock_t on this architecture, which
1703 * reduces latency compared to the unfair variant below. However, it
1704 * also adds more overhead and therefore may reduce throughput.
70574a99 1705 */
8f45e2b5
GH
1706static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1707 __releases(this_rq->lock)
1708 __acquires(busiest->lock)
1709 __acquires(this_rq->lock)
1710{
05fa785c 1711 raw_spin_unlock(&this_rq->lock);
8f45e2b5
GH
1712 double_rq_lock(this_rq, busiest);
1713
1714 return 1;
1715}
1716
1717#else
1718/*
1719 * Unfair double_lock_balance: Optimizes throughput at the expense of
1720 * latency by eliminating extra atomic operations when the locks are
1721 * already in proper order on entry. This favors lower cpu-ids and will
1722 * grant the double lock to lower cpus over higher ids under contention,
1723 * regardless of entry order into the function.
1724 */
1725static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
70574a99
AD
1726 __releases(this_rq->lock)
1727 __acquires(busiest->lock)
1728 __acquires(this_rq->lock)
1729{
1730 int ret = 0;
1731
05fa785c 1732 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
70574a99 1733 if (busiest < this_rq) {
05fa785c
TG
1734 raw_spin_unlock(&this_rq->lock);
1735 raw_spin_lock(&busiest->lock);
1736 raw_spin_lock_nested(&this_rq->lock,
1737 SINGLE_DEPTH_NESTING);
70574a99
AD
1738 ret = 1;
1739 } else
05fa785c
TG
1740 raw_spin_lock_nested(&busiest->lock,
1741 SINGLE_DEPTH_NESTING);
70574a99
AD
1742 }
1743 return ret;
1744}
1745
8f45e2b5
GH
1746#endif /* CONFIG_PREEMPT */
1747
1748/*
1749 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1750 */
1751static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1752{
1753 if (unlikely(!irqs_disabled())) {
1754 /* printk() doesn't work good under rq->lock */
05fa785c 1755 raw_spin_unlock(&this_rq->lock);
8f45e2b5
GH
1756 BUG_ON(1);
1757 }
1758
1759 return _double_lock_balance(this_rq, busiest);
1760}
1761
70574a99
AD
1762static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1763 __releases(busiest->lock)
1764{
05fa785c 1765 raw_spin_unlock(&busiest->lock);
70574a99
AD
1766 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1767}
1e3c88bd
PZ
1768
1769/*
1770 * double_rq_lock - safely lock two runqueues
1771 *
1772 * Note this does not disable interrupts like task_rq_lock,
1773 * you need to do so manually before calling.
1774 */
1775static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1776 __acquires(rq1->lock)
1777 __acquires(rq2->lock)
1778{
1779 BUG_ON(!irqs_disabled());
1780 if (rq1 == rq2) {
1781 raw_spin_lock(&rq1->lock);
1782 __acquire(rq2->lock); /* Fake it out ;) */
1783 } else {
1784 if (rq1 < rq2) {
1785 raw_spin_lock(&rq1->lock);
1786 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1787 } else {
1788 raw_spin_lock(&rq2->lock);
1789 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1790 }
1791 }
1e3c88bd
PZ
1792}
1793
1794/*
1795 * double_rq_unlock - safely unlock two runqueues
1796 *
1797 * Note this does not restore interrupts like task_rq_unlock,
1798 * you need to do so manually after calling.
1799 */
1800static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1801 __releases(rq1->lock)
1802 __releases(rq2->lock)
1803{
1804 raw_spin_unlock(&rq1->lock);
1805 if (rq1 != rq2)
1806 raw_spin_unlock(&rq2->lock);
1807 else
1808 __release(rq2->lock);
1809}
1810
18d95a28
PZ
1811#endif
1812
30432094 1813#ifdef CONFIG_FAIR_GROUP_SCHED
34e83e85
IM
1814static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1815{
30432094 1816#ifdef CONFIG_SMP
34e83e85
IM
1817 cfs_rq->shares = shares;
1818#endif
1819}
30432094 1820#endif
e7693a36 1821
74f5187a 1822static void calc_load_account_idle(struct rq *this_rq);
0bcdcf28 1823static void update_sysctl(void);
acb4a848 1824static int get_update_sysctl_factor(void);
fdf3e95d 1825static void update_cpu_load(struct rq *this_rq);
dce48a84 1826
cd29fe6f
PZ
1827static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1828{
1829 set_task_rq(p, cpu);
1830#ifdef CONFIG_SMP
1831 /*
1832 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1833 * successfuly executed on another CPU. We must ensure that updates of
1834 * per-task data have been completed by this moment.
1835 */
1836 smp_wmb();
1837 task_thread_info(p)->cpu = cpu;
1838#endif
1839}
dce48a84 1840
1e3c88bd 1841static const struct sched_class rt_sched_class;
dd41f596
IM
1842
1843#define sched_class_highest (&rt_sched_class)
1f11eb6a
GH
1844#define for_each_class(class) \
1845 for (class = sched_class_highest; class; class = class->next)
dd41f596 1846
1e3c88bd
PZ
1847#include "sched_stats.h"
1848
c09595f6 1849static void inc_nr_running(struct rq *rq)
9c217245
IM
1850{
1851 rq->nr_running++;
9c217245
IM
1852}
1853
c09595f6 1854static void dec_nr_running(struct rq *rq)
9c217245
IM
1855{
1856 rq->nr_running--;
9c217245
IM
1857}
1858
45bf76df
IM
1859static void set_load_weight(struct task_struct *p)
1860{
1861 if (task_has_rt_policy(p)) {
e51fd5e2
PZ
1862 p->se.load.weight = 0;
1863 p->se.load.inv_weight = WMULT_CONST;
dd41f596
IM
1864 return;
1865 }
45bf76df 1866
dd41f596
IM
1867 /*
1868 * SCHED_IDLE tasks get minimal weight:
1869 */
1870 if (p->policy == SCHED_IDLE) {
1871 p->se.load.weight = WEIGHT_IDLEPRIO;
1872 p->se.load.inv_weight = WMULT_IDLEPRIO;
1873 return;
1874 }
71f8bd46 1875
dd41f596
IM
1876 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1877 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
71f8bd46
IM
1878}
1879
371fd7e7 1880static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 1881{
a64692a3 1882 update_rq_clock(rq);
dd41f596 1883 sched_info_queued(p);
371fd7e7 1884 p->sched_class->enqueue_task(rq, p, flags);
dd41f596 1885 p->se.on_rq = 1;
71f8bd46
IM
1886}
1887
371fd7e7 1888static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 1889{
a64692a3 1890 update_rq_clock(rq);
46ac22ba 1891 sched_info_dequeued(p);
371fd7e7 1892 p->sched_class->dequeue_task(rq, p, flags);
dd41f596 1893 p->se.on_rq = 0;
71f8bd46
IM
1894}
1895
1e3c88bd
PZ
1896/*
1897 * activate_task - move a task to the runqueue.
1898 */
371fd7e7 1899static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
1900{
1901 if (task_contributes_to_load(p))
1902 rq->nr_uninterruptible--;
1903
371fd7e7 1904 enqueue_task(rq, p, flags);
1e3c88bd
PZ
1905 inc_nr_running(rq);
1906}
1907
1908/*
1909 * deactivate_task - remove a task from the runqueue.
1910 */
371fd7e7 1911static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
1912{
1913 if (task_contributes_to_load(p))
1914 rq->nr_uninterruptible++;
1915
371fd7e7 1916 dequeue_task(rq, p, flags);
1e3c88bd
PZ
1917 dec_nr_running(rq);
1918}
1919
1920#include "sched_idletask.c"
1921#include "sched_fair.c"
1922#include "sched_rt.c"
1923#ifdef CONFIG_SCHED_DEBUG
1924# include "sched_debug.c"
1925#endif
1926
14531189 1927/*
dd41f596 1928 * __normal_prio - return the priority that is based on the static prio
14531189 1929 */
14531189
IM
1930static inline int __normal_prio(struct task_struct *p)
1931{
dd41f596 1932 return p->static_prio;
14531189
IM
1933}
1934
b29739f9
IM
1935/*
1936 * Calculate the expected normal priority: i.e. priority
1937 * without taking RT-inheritance into account. Might be
1938 * boosted by interactivity modifiers. Changes upon fork,
1939 * setprio syscalls, and whenever the interactivity
1940 * estimator recalculates.
1941 */
36c8b586 1942static inline int normal_prio(struct task_struct *p)
b29739f9
IM
1943{
1944 int prio;
1945
e05606d3 1946 if (task_has_rt_policy(p))
b29739f9
IM
1947 prio = MAX_RT_PRIO-1 - p->rt_priority;
1948 else
1949 prio = __normal_prio(p);
1950 return prio;
1951}
1952
1953/*
1954 * Calculate the current priority, i.e. the priority
1955 * taken into account by the scheduler. This value might
1956 * be boosted by RT tasks, or might be boosted by
1957 * interactivity modifiers. Will be RT if the task got
1958 * RT-boosted. If not then it returns p->normal_prio.
1959 */
36c8b586 1960static int effective_prio(struct task_struct *p)
b29739f9
IM
1961{
1962 p->normal_prio = normal_prio(p);
1963 /*
1964 * If we are RT tasks or we were boosted to RT priority,
1965 * keep the priority unchanged. Otherwise, update priority
1966 * to the normal priority:
1967 */
1968 if (!rt_prio(p->prio))
1969 return p->normal_prio;
1970 return p->prio;
1971}
1972
1da177e4
LT
1973/**
1974 * task_curr - is this task currently executing on a CPU?
1975 * @p: the task in question.
1976 */
36c8b586 1977inline int task_curr(const struct task_struct *p)
1da177e4
LT
1978{
1979 return cpu_curr(task_cpu(p)) == p;
1980}
1981
cb469845
SR
1982static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1983 const struct sched_class *prev_class,
1984 int oldprio, int running)
1985{
1986 if (prev_class != p->sched_class) {
1987 if (prev_class->switched_from)
1988 prev_class->switched_from(rq, p, running);
1989 p->sched_class->switched_to(rq, p, running);
1990 } else
1991 p->sched_class->prio_changed(rq, p, oldprio, running);
1992}
1993
1da177e4 1994#ifdef CONFIG_SMP
cc367732
IM
1995/*
1996 * Is this task likely cache-hot:
1997 */
e7693a36 1998static int
cc367732
IM
1999task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2000{
2001 s64 delta;
2002
e6c8fba7
PZ
2003 if (p->sched_class != &fair_sched_class)
2004 return 0;
2005
f540a608
IM
2006 /*
2007 * Buddy candidates are cache hot:
2008 */
f685ceac 2009 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4793241b
PZ
2010 (&p->se == cfs_rq_of(&p->se)->next ||
2011 &p->se == cfs_rq_of(&p->se)->last))
f540a608
IM
2012 return 1;
2013
6bc1665b
IM
2014 if (sysctl_sched_migration_cost == -1)
2015 return 1;
2016 if (sysctl_sched_migration_cost == 0)
2017 return 0;
2018
cc367732
IM
2019 delta = now - p->se.exec_start;
2020
2021 return delta < (s64)sysctl_sched_migration_cost;
2022}
2023
dd41f596 2024void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 2025{
e2912009
PZ
2026#ifdef CONFIG_SCHED_DEBUG
2027 /*
2028 * We should never call set_task_cpu() on a blocked task,
2029 * ttwu() will sort out the placement.
2030 */
077614ee
PZ
2031 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2032 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
e2912009
PZ
2033#endif
2034
de1d7286 2035 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 2036
0c69774e
PZ
2037 if (task_cpu(p) != new_cpu) {
2038 p->se.nr_migrations++;
2039 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2040 }
dd41f596
IM
2041
2042 __set_task_cpu(p, new_cpu);
c65cc870
IM
2043}
2044
969c7921 2045struct migration_arg {
36c8b586 2046 struct task_struct *task;
1da177e4 2047 int dest_cpu;
70b97a7f 2048};
1da177e4 2049
969c7921
TH
2050static int migration_cpu_stop(void *data);
2051
1da177e4
LT
2052/*
2053 * The task's runqueue lock must be held.
2054 * Returns true if you have to wait for migration thread.
2055 */
969c7921 2056static bool migrate_task(struct task_struct *p, int dest_cpu)
1da177e4 2057{
70b97a7f 2058 struct rq *rq = task_rq(p);
1da177e4
LT
2059
2060 /*
2061 * If the task is not on a runqueue (and not running), then
e2912009 2062 * the next wake-up will properly place the task.
1da177e4 2063 */
969c7921 2064 return p->se.on_rq || task_running(rq, p);
1da177e4
LT
2065}
2066
2067/*
2068 * wait_task_inactive - wait for a thread to unschedule.
2069 *
85ba2d86
RM
2070 * If @match_state is nonzero, it's the @p->state value just checked and
2071 * not expected to change. If it changes, i.e. @p might have woken up,
2072 * then return zero. When we succeed in waiting for @p to be off its CPU,
2073 * we return a positive number (its total switch count). If a second call
2074 * a short while later returns the same number, the caller can be sure that
2075 * @p has remained unscheduled the whole time.
2076 *
1da177e4
LT
2077 * The caller must ensure that the task *will* unschedule sometime soon,
2078 * else this function might spin for a *long* time. This function can't
2079 * be called with interrupts off, or it may introduce deadlock with
2080 * smp_call_function() if an IPI is sent by the same process we are
2081 * waiting to become inactive.
2082 */
85ba2d86 2083unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
2084{
2085 unsigned long flags;
dd41f596 2086 int running, on_rq;
85ba2d86 2087 unsigned long ncsw;
70b97a7f 2088 struct rq *rq;
1da177e4 2089
3a5c359a
AK
2090 for (;;) {
2091 /*
2092 * We do the initial early heuristics without holding
2093 * any task-queue locks at all. We'll only try to get
2094 * the runqueue lock when things look like they will
2095 * work out!
2096 */
2097 rq = task_rq(p);
fa490cfd 2098
3a5c359a
AK
2099 /*
2100 * If the task is actively running on another CPU
2101 * still, just relax and busy-wait without holding
2102 * any locks.
2103 *
2104 * NOTE! Since we don't hold any locks, it's not
2105 * even sure that "rq" stays as the right runqueue!
2106 * But we don't care, since "task_running()" will
2107 * return false if the runqueue has changed and p
2108 * is actually now running somewhere else!
2109 */
85ba2d86
RM
2110 while (task_running(rq, p)) {
2111 if (match_state && unlikely(p->state != match_state))
2112 return 0;
3a5c359a 2113 cpu_relax();
85ba2d86 2114 }
fa490cfd 2115
3a5c359a
AK
2116 /*
2117 * Ok, time to look more closely! We need the rq
2118 * lock now, to be *sure*. If we're wrong, we'll
2119 * just go back and repeat.
2120 */
2121 rq = task_rq_lock(p, &flags);
27a9da65 2122 trace_sched_wait_task(p);
3a5c359a
AK
2123 running = task_running(rq, p);
2124 on_rq = p->se.on_rq;
85ba2d86 2125 ncsw = 0;
f31e11d8 2126 if (!match_state || p->state == match_state)
93dcf55f 2127 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
3a5c359a 2128 task_rq_unlock(rq, &flags);
fa490cfd 2129
85ba2d86
RM
2130 /*
2131 * If it changed from the expected state, bail out now.
2132 */
2133 if (unlikely(!ncsw))
2134 break;
2135
3a5c359a
AK
2136 /*
2137 * Was it really running after all now that we
2138 * checked with the proper locks actually held?
2139 *
2140 * Oops. Go back and try again..
2141 */
2142 if (unlikely(running)) {
2143 cpu_relax();
2144 continue;
2145 }
fa490cfd 2146
3a5c359a
AK
2147 /*
2148 * It's not enough that it's not actively running,
2149 * it must be off the runqueue _entirely_, and not
2150 * preempted!
2151 *
80dd99b3 2152 * So if it was still runnable (but just not actively
3a5c359a
AK
2153 * running right now), it's preempted, and we should
2154 * yield - it could be a while.
2155 */
2156 if (unlikely(on_rq)) {
2157 schedule_timeout_uninterruptible(1);
2158 continue;
2159 }
fa490cfd 2160
3a5c359a
AK
2161 /*
2162 * Ahh, all good. It wasn't running, and it wasn't
2163 * runnable, which means that it will never become
2164 * running in the future either. We're all done!
2165 */
2166 break;
2167 }
85ba2d86
RM
2168
2169 return ncsw;
1da177e4
LT
2170}
2171
2172/***
2173 * kick_process - kick a running thread to enter/exit the kernel
2174 * @p: the to-be-kicked thread
2175 *
2176 * Cause a process which is running on another CPU to enter
2177 * kernel-mode, without any delay. (to get signals handled.)
2178 *
2179 * NOTE: this function doesnt have to take the runqueue lock,
2180 * because all it wants to ensure is that the remote task enters
2181 * the kernel. If the IPI races and the task has been migrated
2182 * to another CPU then no harm is done and the purpose has been
2183 * achieved as well.
2184 */
36c8b586 2185void kick_process(struct task_struct *p)
1da177e4
LT
2186{
2187 int cpu;
2188
2189 preempt_disable();
2190 cpu = task_cpu(p);
2191 if ((cpu != smp_processor_id()) && task_curr(p))
2192 smp_send_reschedule(cpu);
2193 preempt_enable();
2194}
b43e3521 2195EXPORT_SYMBOL_GPL(kick_process);
476d139c 2196#endif /* CONFIG_SMP */
1da177e4 2197
0793a61d
TG
2198/**
2199 * task_oncpu_function_call - call a function on the cpu on which a task runs
2200 * @p: the task to evaluate
2201 * @func: the function to be called
2202 * @info: the function call argument
2203 *
2204 * Calls the function @func when the task is currently running. This might
2205 * be on the current CPU, which just calls the function directly
2206 */
2207void task_oncpu_function_call(struct task_struct *p,
2208 void (*func) (void *info), void *info)
2209{
2210 int cpu;
2211
2212 preempt_disable();
2213 cpu = task_cpu(p);
2214 if (task_curr(p))
2215 smp_call_function_single(cpu, func, info, 1);
2216 preempt_enable();
2217}
2218
970b13ba 2219#ifdef CONFIG_SMP
30da688e
ON
2220/*
2221 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2222 */
5da9a0fb
PZ
2223static int select_fallback_rq(int cpu, struct task_struct *p)
2224{
2225 int dest_cpu;
2226 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2227
2228 /* Look for allowed, online CPU in same node. */
2229 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2230 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2231 return dest_cpu;
2232
2233 /* Any allowed, online CPU? */
2234 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2235 if (dest_cpu < nr_cpu_ids)
2236 return dest_cpu;
2237
2238 /* No more Mr. Nice Guy. */
897f0b3c 2239 if (unlikely(dest_cpu >= nr_cpu_ids)) {
9084bb82 2240 dest_cpu = cpuset_cpus_allowed_fallback(p);
5da9a0fb
PZ
2241 /*
2242 * Don't tell them about moving exiting tasks or
2243 * kernel threads (both mm NULL), since they never
2244 * leave kernel.
2245 */
2246 if (p->mm && printk_ratelimit()) {
2247 printk(KERN_INFO "process %d (%s) no "
2248 "longer affine to cpu%d\n",
2249 task_pid_nr(p), p->comm, cpu);
2250 }
2251 }
2252
2253 return dest_cpu;
2254}
2255
e2912009 2256/*
30da688e 2257 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
e2912009 2258 */
970b13ba 2259static inline
0017d735 2260int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
970b13ba 2261{
0017d735 2262 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
e2912009
PZ
2263
2264 /*
2265 * In order not to call set_task_cpu() on a blocking task we need
2266 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2267 * cpu.
2268 *
2269 * Since this is common to all placement strategies, this lives here.
2270 *
2271 * [ this allows ->select_task() to simply return task_cpu(p) and
2272 * not worry about this generic constraint ]
2273 */
2274 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
70f11205 2275 !cpu_online(cpu)))
5da9a0fb 2276 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
2277
2278 return cpu;
970b13ba 2279}
09a40af5
MG
2280
2281static void update_avg(u64 *avg, u64 sample)
2282{
2283 s64 diff = sample - *avg;
2284 *avg += diff >> 3;
2285}
970b13ba
PZ
2286#endif
2287
9ed3811a
TH
2288static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
2289 bool is_sync, bool is_migrate, bool is_local,
2290 unsigned long en_flags)
2291{
2292 schedstat_inc(p, se.statistics.nr_wakeups);
2293 if (is_sync)
2294 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2295 if (is_migrate)
2296 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2297 if (is_local)
2298 schedstat_inc(p, se.statistics.nr_wakeups_local);
2299 else
2300 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2301
2302 activate_task(rq, p, en_flags);
2303}
2304
2305static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2306 int wake_flags, bool success)
2307{
2308 trace_sched_wakeup(p, success);
2309 check_preempt_curr(rq, p, wake_flags);
2310
2311 p->state = TASK_RUNNING;
2312#ifdef CONFIG_SMP
2313 if (p->sched_class->task_woken)
2314 p->sched_class->task_woken(rq, p);
2315
2316 if (unlikely(rq->idle_stamp)) {
2317 u64 delta = rq->clock - rq->idle_stamp;
2318 u64 max = 2*sysctl_sched_migration_cost;
2319
2320 if (delta > max)
2321 rq->avg_idle = max;
2322 else
2323 update_avg(&rq->avg_idle, delta);
2324 rq->idle_stamp = 0;
2325 }
2326#endif
21aa9af0
TH
2327 /* if a worker is waking up, notify workqueue */
2328 if ((p->flags & PF_WQ_WORKER) && success)
2329 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
2330}
2331
2332/**
1da177e4 2333 * try_to_wake_up - wake up a thread
9ed3811a 2334 * @p: the thread to be awakened
1da177e4 2335 * @state: the mask of task states that can be woken
9ed3811a 2336 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
2337 *
2338 * Put it on the run-queue if it's not already there. The "current"
2339 * thread is always on the run-queue (except when the actual
2340 * re-schedule is in progress), and as such you're allowed to do
2341 * the simpler "current->state = TASK_RUNNING" to mark yourself
2342 * runnable without the overhead of this.
2343 *
9ed3811a
TH
2344 * Returns %true if @p was woken up, %false if it was already running
2345 * or @state didn't match @p's state.
1da177e4 2346 */
7d478721
PZ
2347static int try_to_wake_up(struct task_struct *p, unsigned int state,
2348 int wake_flags)
1da177e4 2349{
cc367732 2350 int cpu, orig_cpu, this_cpu, success = 0;
1da177e4 2351 unsigned long flags;
371fd7e7 2352 unsigned long en_flags = ENQUEUE_WAKEUP;
ab3b3aa5 2353 struct rq *rq;
1da177e4 2354
e9c84311 2355 this_cpu = get_cpu();
2398f2c6 2356
04e2f174 2357 smp_wmb();
ab3b3aa5 2358 rq = task_rq_lock(p, &flags);
e9c84311 2359 if (!(p->state & state))
1da177e4
LT
2360 goto out;
2361
dd41f596 2362 if (p->se.on_rq)
1da177e4
LT
2363 goto out_running;
2364
2365 cpu = task_cpu(p);
cc367732 2366 orig_cpu = cpu;
1da177e4
LT
2367
2368#ifdef CONFIG_SMP
2369 if (unlikely(task_running(rq, p)))
2370 goto out_activate;
2371
e9c84311
PZ
2372 /*
2373 * In order to handle concurrent wakeups and release the rq->lock
2374 * we put the task in TASK_WAKING state.
eb24073b
IM
2375 *
2376 * First fix up the nr_uninterruptible count:
e9c84311 2377 */
cc87f76a
PZ
2378 if (task_contributes_to_load(p)) {
2379 if (likely(cpu_online(orig_cpu)))
2380 rq->nr_uninterruptible--;
2381 else
2382 this_rq()->nr_uninterruptible--;
2383 }
e9c84311 2384 p->state = TASK_WAKING;
efbbd05a 2385
371fd7e7 2386 if (p->sched_class->task_waking) {
efbbd05a 2387 p->sched_class->task_waking(rq, p);
371fd7e7
PZ
2388 en_flags |= ENQUEUE_WAKING;
2389 }
efbbd05a 2390
0017d735
PZ
2391 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2392 if (cpu != orig_cpu)
5d2f5a61 2393 set_task_cpu(p, cpu);
0017d735 2394 __task_rq_unlock(rq);
ab19cb23 2395
0970d299
PZ
2396 rq = cpu_rq(cpu);
2397 raw_spin_lock(&rq->lock);
f5dc3753 2398
0970d299
PZ
2399 /*
2400 * We migrated the task without holding either rq->lock, however
2401 * since the task is not on the task list itself, nobody else
2402 * will try and migrate the task, hence the rq should match the
2403 * cpu we just moved it to.
2404 */
2405 WARN_ON(task_cpu(p) != cpu);
e9c84311 2406 WARN_ON(p->state != TASK_WAKING);
1da177e4 2407
e7693a36
GH
2408#ifdef CONFIG_SCHEDSTATS
2409 schedstat_inc(rq, ttwu_count);
2410 if (cpu == this_cpu)
2411 schedstat_inc(rq, ttwu_local);
2412 else {
2413 struct sched_domain *sd;
2414 for_each_domain(this_cpu, sd) {
758b2cdc 2415 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
e7693a36
GH
2416 schedstat_inc(sd, ttwu_wake_remote);
2417 break;
2418 }
2419 }
2420 }
6d6bc0ad 2421#endif /* CONFIG_SCHEDSTATS */
e7693a36 2422
1da177e4
LT
2423out_activate:
2424#endif /* CONFIG_SMP */
9ed3811a
TH
2425 ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
2426 cpu == this_cpu, en_flags);
1da177e4 2427 success = 1;
1da177e4 2428out_running:
9ed3811a 2429 ttwu_post_activation(p, rq, wake_flags, success);
1da177e4
LT
2430out:
2431 task_rq_unlock(rq, &flags);
e9c84311 2432 put_cpu();
1da177e4
LT
2433
2434 return success;
2435}
2436
21aa9af0
TH
2437/**
2438 * try_to_wake_up_local - try to wake up a local task with rq lock held
2439 * @p: the thread to be awakened
2440 *
2441 * Put @p on the run-queue if it's not alredy there. The caller must
2442 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2443 * the current task. this_rq() stays locked over invocation.
2444 */
2445static void try_to_wake_up_local(struct task_struct *p)
2446{
2447 struct rq *rq = task_rq(p);
2448 bool success = false;
2449
2450 BUG_ON(rq != this_rq());
2451 BUG_ON(p == current);
2452 lockdep_assert_held(&rq->lock);
2453
2454 if (!(p->state & TASK_NORMAL))
2455 return;
2456
2457 if (!p->se.on_rq) {
2458 if (likely(!task_running(rq, p))) {
2459 schedstat_inc(rq, ttwu_count);
2460 schedstat_inc(rq, ttwu_local);
2461 }
2462 ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
2463 success = true;
2464 }
2465 ttwu_post_activation(p, rq, 0, success);
2466}
2467
50fa610a
DH
2468/**
2469 * wake_up_process - Wake up a specific process
2470 * @p: The process to be woken up.
2471 *
2472 * Attempt to wake up the nominated process and move it to the set of runnable
2473 * processes. Returns 1 if the process was woken up, 0 if it was already
2474 * running.
2475 *
2476 * It may be assumed that this function implies a write memory barrier before
2477 * changing the task state if and only if any tasks are woken up.
2478 */
7ad5b3a5 2479int wake_up_process(struct task_struct *p)
1da177e4 2480{
d9514f6c 2481 return try_to_wake_up(p, TASK_ALL, 0);
1da177e4 2482}
1da177e4
LT
2483EXPORT_SYMBOL(wake_up_process);
2484
7ad5b3a5 2485int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
2486{
2487 return try_to_wake_up(p, state, 0);
2488}
2489
1da177e4
LT
2490/*
2491 * Perform scheduler related setup for a newly forked process p.
2492 * p is forked by current.
dd41f596
IM
2493 *
2494 * __sched_fork() is basic setup used by init_idle() too:
2495 */
2496static void __sched_fork(struct task_struct *p)
2497{
dd41f596
IM
2498 p->se.exec_start = 0;
2499 p->se.sum_exec_runtime = 0;
f6cf891c 2500 p->se.prev_sum_exec_runtime = 0;
6c594c21 2501 p->se.nr_migrations = 0;
6cfb0d5d
IM
2502
2503#ifdef CONFIG_SCHEDSTATS
41acab88 2504 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 2505#endif
476d139c 2506
fa717060 2507 INIT_LIST_HEAD(&p->rt.run_list);
dd41f596 2508 p->se.on_rq = 0;
4a55bd5e 2509 INIT_LIST_HEAD(&p->se.group_node);
476d139c 2510
e107be36
AK
2511#ifdef CONFIG_PREEMPT_NOTIFIERS
2512 INIT_HLIST_HEAD(&p->preempt_notifiers);
2513#endif
dd41f596
IM
2514}
2515
2516/*
2517 * fork()/clone()-time setup:
2518 */
2519void sched_fork(struct task_struct *p, int clone_flags)
2520{
2521 int cpu = get_cpu();
2522
2523 __sched_fork(p);
06b83b5f 2524 /*
0017d735 2525 * We mark the process as running here. This guarantees that
06b83b5f
PZ
2526 * nobody will actually run it, and a signal or other external
2527 * event cannot wake it up and insert it on the runqueue either.
2528 */
0017d735 2529 p->state = TASK_RUNNING;
dd41f596 2530
b9dc29e7
MG
2531 /*
2532 * Revert to default priority/policy on fork if requested.
2533 */
2534 if (unlikely(p->sched_reset_on_fork)) {
f83f9ac2 2535 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
b9dc29e7 2536 p->policy = SCHED_NORMAL;
f83f9ac2
PW
2537 p->normal_prio = p->static_prio;
2538 }
b9dc29e7 2539
6c697bdf
MG
2540 if (PRIO_TO_NICE(p->static_prio) < 0) {
2541 p->static_prio = NICE_TO_PRIO(0);
f83f9ac2 2542 p->normal_prio = p->static_prio;
6c697bdf
MG
2543 set_load_weight(p);
2544 }
2545
b9dc29e7
MG
2546 /*
2547 * We don't need the reset flag anymore after the fork. It has
2548 * fulfilled its duty:
2549 */
2550 p->sched_reset_on_fork = 0;
2551 }
ca94c442 2552
f83f9ac2
PW
2553 /*
2554 * Make sure we do not leak PI boosting priority to the child.
2555 */
2556 p->prio = current->normal_prio;
2557
2ddbf952
HS
2558 if (!rt_prio(p->prio))
2559 p->sched_class = &fair_sched_class;
b29739f9 2560
cd29fe6f
PZ
2561 if (p->sched_class->task_fork)
2562 p->sched_class->task_fork(p);
2563
86951599
PZ
2564 /*
2565 * The child is not yet in the pid-hash so no cgroup attach races,
2566 * and the cgroup is pinned to this child due to cgroup_fork()
2567 * is ran before sched_fork().
2568 *
2569 * Silence PROVE_RCU.
2570 */
2571 rcu_read_lock();
5f3edc1b 2572 set_task_cpu(p, cpu);
86951599 2573 rcu_read_unlock();
5f3edc1b 2574
52f17b6c 2575#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
dd41f596 2576 if (likely(sched_info_on()))
52f17b6c 2577 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 2578#endif
d6077cb8 2579#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
4866cde0
NP
2580 p->oncpu = 0;
2581#endif
1da177e4 2582#ifdef CONFIG_PREEMPT
4866cde0 2583 /* Want to start with kernel preemption disabled. */
a1261f54 2584 task_thread_info(p)->preempt_count = 1;
1da177e4 2585#endif
917b627d
GH
2586 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2587
476d139c 2588 put_cpu();
1da177e4
LT
2589}
2590
2591/*
2592 * wake_up_new_task - wake up a newly created task for the first time.
2593 *
2594 * This function will do some initial scheduler statistics housekeeping
2595 * that must be done for every newly created context, then puts the task
2596 * on the runqueue and wakes it.
2597 */
7ad5b3a5 2598void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1da177e4
LT
2599{
2600 unsigned long flags;
dd41f596 2601 struct rq *rq;
c890692b 2602 int cpu __maybe_unused = get_cpu();
fabf318e
PZ
2603
2604#ifdef CONFIG_SMP
0017d735
PZ
2605 rq = task_rq_lock(p, &flags);
2606 p->state = TASK_WAKING;
2607
fabf318e
PZ
2608 /*
2609 * Fork balancing, do it here and not earlier because:
2610 * - cpus_allowed can change in the fork path
2611 * - any previously selected cpu might disappear through hotplug
2612 *
0017d735
PZ
2613 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2614 * without people poking at ->cpus_allowed.
fabf318e 2615 */
0017d735 2616 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
fabf318e 2617 set_task_cpu(p, cpu);
1da177e4 2618
06b83b5f 2619 p->state = TASK_RUNNING;
0017d735
PZ
2620 task_rq_unlock(rq, &flags);
2621#endif
2622
2623 rq = task_rq_lock(p, &flags);
cd29fe6f 2624 activate_task(rq, p, 0);
27a9da65 2625 trace_sched_wakeup_new(p, 1);
a7558e01 2626 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 2627#ifdef CONFIG_SMP
efbbd05a
PZ
2628 if (p->sched_class->task_woken)
2629 p->sched_class->task_woken(rq, p);
9a897c5a 2630#endif
dd41f596 2631 task_rq_unlock(rq, &flags);
fabf318e 2632 put_cpu();
1da177e4
LT
2633}
2634
e107be36
AK
2635#ifdef CONFIG_PREEMPT_NOTIFIERS
2636
2637/**
80dd99b3 2638 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 2639 * @notifier: notifier struct to register
e107be36
AK
2640 */
2641void preempt_notifier_register(struct preempt_notifier *notifier)
2642{
2643 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2644}
2645EXPORT_SYMBOL_GPL(preempt_notifier_register);
2646
2647/**
2648 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 2649 * @notifier: notifier struct to unregister
e107be36
AK
2650 *
2651 * This is safe to call from within a preemption notifier.
2652 */
2653void preempt_notifier_unregister(struct preempt_notifier *notifier)
2654{
2655 hlist_del(&notifier->link);
2656}
2657EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2658
2659static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2660{
2661 struct preempt_notifier *notifier;
2662 struct hlist_node *node;
2663
2664 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2665 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2666}
2667
2668static void
2669fire_sched_out_preempt_notifiers(struct task_struct *curr,
2670 struct task_struct *next)
2671{
2672 struct preempt_notifier *notifier;
2673 struct hlist_node *node;
2674
2675 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2676 notifier->ops->sched_out(notifier, next);
2677}
2678
6d6bc0ad 2679#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36
AK
2680
2681static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2682{
2683}
2684
2685static void
2686fire_sched_out_preempt_notifiers(struct task_struct *curr,
2687 struct task_struct *next)
2688{
2689}
2690
6d6bc0ad 2691#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 2692
4866cde0
NP
2693/**
2694 * prepare_task_switch - prepare to switch tasks
2695 * @rq: the runqueue preparing to switch
421cee29 2696 * @prev: the current task that is being switched out
4866cde0
NP
2697 * @next: the task we are going to switch to.
2698 *
2699 * This is called with the rq lock held and interrupts off. It must
2700 * be paired with a subsequent finish_task_switch after the context
2701 * switch.
2702 *
2703 * prepare_task_switch sets up locking and calls architecture specific
2704 * hooks.
2705 */
e107be36
AK
2706static inline void
2707prepare_task_switch(struct rq *rq, struct task_struct *prev,
2708 struct task_struct *next)
4866cde0 2709{
e107be36 2710 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
2711 prepare_lock_switch(rq, next);
2712 prepare_arch_switch(next);
2713}
2714
1da177e4
LT
2715/**
2716 * finish_task_switch - clean up after a task-switch
344babaa 2717 * @rq: runqueue associated with task-switch
1da177e4
LT
2718 * @prev: the thread we just switched away from.
2719 *
4866cde0
NP
2720 * finish_task_switch must be called after the context switch, paired
2721 * with a prepare_task_switch call before the context switch.
2722 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2723 * and do any other architecture-specific cleanup actions.
1da177e4
LT
2724 *
2725 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 2726 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
2727 * with the lock held can cause deadlocks; see schedule() for
2728 * details.)
2729 */
a9957449 2730static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1da177e4
LT
2731 __releases(rq->lock)
2732{
1da177e4 2733 struct mm_struct *mm = rq->prev_mm;
55a101f8 2734 long prev_state;
1da177e4
LT
2735
2736 rq->prev_mm = NULL;
2737
2738 /*
2739 * A task struct has one reference for the use as "current".
c394cc9f 2740 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
2741 * schedule one last time. The schedule call will never return, and
2742 * the scheduled task must drop that reference.
c394cc9f 2743 * The test for TASK_DEAD must occur while the runqueue locks are
1da177e4
LT
2744 * still held, otherwise prev could be scheduled on another cpu, die
2745 * there before we look at prev->state, and then the reference would
2746 * be dropped twice.
2747 * Manfred Spraul <manfred@colorfullife.com>
2748 */
55a101f8 2749 prev_state = prev->state;
4866cde0 2750 finish_arch_switch(prev);
8381f65d
JI
2751#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2752 local_irq_disable();
2753#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
49f47433 2754 perf_event_task_sched_in(current);
8381f65d
JI
2755#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2756 local_irq_enable();
2757#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
4866cde0 2758 finish_lock_switch(rq, prev);
e8fa1362 2759
e107be36 2760 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
2761 if (mm)
2762 mmdrop(mm);
c394cc9f 2763 if (unlikely(prev_state == TASK_DEAD)) {
c6fd91f0 2764 /*
2765 * Remove function-return probe instances associated with this
2766 * task and put them back on the free list.
9761eea8 2767 */
c6fd91f0 2768 kprobe_flush_task(prev);
1da177e4 2769 put_task_struct(prev);
c6fd91f0 2770 }
1da177e4
LT
2771}
2772
3f029d3c
GH
2773#ifdef CONFIG_SMP
2774
2775/* assumes rq->lock is held */
2776static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2777{
2778 if (prev->sched_class->pre_schedule)
2779 prev->sched_class->pre_schedule(rq, prev);
2780}
2781
2782/* rq->lock is NOT held, but preemption is disabled */
2783static inline void post_schedule(struct rq *rq)
2784{
2785 if (rq->post_schedule) {
2786 unsigned long flags;
2787
05fa785c 2788 raw_spin_lock_irqsave(&rq->lock, flags);
3f029d3c
GH
2789 if (rq->curr->sched_class->post_schedule)
2790 rq->curr->sched_class->post_schedule(rq);
05fa785c 2791 raw_spin_unlock_irqrestore(&rq->lock, flags);
3f029d3c
GH
2792
2793 rq->post_schedule = 0;
2794 }
2795}
2796
2797#else
da19ab51 2798
3f029d3c
GH
2799static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2800{
2801}
2802
2803static inline void post_schedule(struct rq *rq)
2804{
1da177e4
LT
2805}
2806
3f029d3c
GH
2807#endif
2808
1da177e4
LT
2809/**
2810 * schedule_tail - first thing a freshly forked thread must call.
2811 * @prev: the thread we just switched away from.
2812 */
36c8b586 2813asmlinkage void schedule_tail(struct task_struct *prev)
1da177e4
LT
2814 __releases(rq->lock)
2815{
70b97a7f
IM
2816 struct rq *rq = this_rq();
2817
4866cde0 2818 finish_task_switch(rq, prev);
da19ab51 2819
3f029d3c
GH
2820 /*
2821 * FIXME: do we need to worry about rq being invalidated by the
2822 * task_switch?
2823 */
2824 post_schedule(rq);
70b97a7f 2825
4866cde0
NP
2826#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2827 /* In this case, finish_task_switch does not reenable preemption */
2828 preempt_enable();
2829#endif
1da177e4 2830 if (current->set_child_tid)
b488893a 2831 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
2832}
2833
2834/*
2835 * context_switch - switch to the new MM and the new
2836 * thread's register state.
2837 */
dd41f596 2838static inline void
70b97a7f 2839context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 2840 struct task_struct *next)
1da177e4 2841{
dd41f596 2842 struct mm_struct *mm, *oldmm;
1da177e4 2843
e107be36 2844 prepare_task_switch(rq, prev, next);
27a9da65 2845 trace_sched_switch(prev, next);
dd41f596
IM
2846 mm = next->mm;
2847 oldmm = prev->active_mm;
9226d125
ZA
2848 /*
2849 * For paravirt, this is coupled with an exit in switch_to to
2850 * combine the page table reload and the switch backend into
2851 * one hypercall.
2852 */
224101ed 2853 arch_start_context_switch(prev);
9226d125 2854
710390d9 2855 if (likely(!mm)) {
1da177e4
LT
2856 next->active_mm = oldmm;
2857 atomic_inc(&oldmm->mm_count);
2858 enter_lazy_tlb(oldmm, next);
2859 } else
2860 switch_mm(oldmm, mm, next);
2861
710390d9 2862 if (likely(!prev->mm)) {
1da177e4 2863 prev->active_mm = NULL;
1da177e4
LT
2864 rq->prev_mm = oldmm;
2865 }
3a5f5e48
IM
2866 /*
2867 * Since the runqueue lock will be released by the next
2868 * task (which is an invalid locking op but in the case
2869 * of the scheduler it's an obvious special-case), so we
2870 * do an early lockdep release here:
2871 */
2872#ifndef __ARCH_WANT_UNLOCKED_CTXSW
8a25d5de 2873 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3a5f5e48 2874#endif
1da177e4
LT
2875
2876 /* Here we just switch the register state and the stack. */
2877 switch_to(prev, next, prev);
2878
dd41f596
IM
2879 barrier();
2880 /*
2881 * this_rq must be evaluated again because prev may have moved
2882 * CPUs since it called schedule(), thus the 'rq' on its stack
2883 * frame will be invalid.
2884 */
2885 finish_task_switch(this_rq(), prev);
1da177e4
LT
2886}
2887
2888/*
2889 * nr_running, nr_uninterruptible and nr_context_switches:
2890 *
2891 * externally visible scheduler statistics: current number of runnable
2892 * threads, current number of uninterruptible-sleeping threads, total
2893 * number of context switches performed since bootup.
2894 */
2895unsigned long nr_running(void)
2896{
2897 unsigned long i, sum = 0;
2898
2899 for_each_online_cpu(i)
2900 sum += cpu_rq(i)->nr_running;
2901
2902 return sum;
f711f609 2903}
1da177e4
LT
2904
2905unsigned long nr_uninterruptible(void)
f711f609 2906{
1da177e4 2907 unsigned long i, sum = 0;
f711f609 2908
0a945022 2909 for_each_possible_cpu(i)
1da177e4 2910 sum += cpu_rq(i)->nr_uninterruptible;
f711f609
GS
2911
2912 /*
1da177e4
LT
2913 * Since we read the counters lockless, it might be slightly
2914 * inaccurate. Do not allow it to go below zero though:
f711f609 2915 */
1da177e4
LT
2916 if (unlikely((long)sum < 0))
2917 sum = 0;
f711f609 2918
1da177e4 2919 return sum;
f711f609 2920}
f711f609 2921
1da177e4 2922unsigned long long nr_context_switches(void)
46cb4b7c 2923{
cc94abfc
SR
2924 int i;
2925 unsigned long long sum = 0;
46cb4b7c 2926
0a945022 2927 for_each_possible_cpu(i)
1da177e4 2928 sum += cpu_rq(i)->nr_switches;
46cb4b7c 2929
1da177e4
LT
2930 return sum;
2931}
483b4ee6 2932
1da177e4
LT
2933unsigned long nr_iowait(void)
2934{
2935 unsigned long i, sum = 0;
483b4ee6 2936
0a945022 2937 for_each_possible_cpu(i)
1da177e4 2938 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 2939
1da177e4
LT
2940 return sum;
2941}
483b4ee6 2942
8c215bd3 2943unsigned long nr_iowait_cpu(int cpu)
69d25870 2944{
8c215bd3 2945 struct rq *this = cpu_rq(cpu);
69d25870
AV
2946 return atomic_read(&this->nr_iowait);
2947}
46cb4b7c 2948
69d25870
AV
2949unsigned long this_cpu_load(void)
2950{
2951 struct rq *this = this_rq();
2952 return this->cpu_load[0];
2953}
e790fb0b 2954
46cb4b7c 2955
dce48a84
TG
2956/* Variables and functions for calc_load */
2957static atomic_long_t calc_load_tasks;
2958static unsigned long calc_load_update;
2959unsigned long avenrun[3];
2960EXPORT_SYMBOL(avenrun);
46cb4b7c 2961
74f5187a
PZ
2962static long calc_load_fold_active(struct rq *this_rq)
2963{
2964 long nr_active, delta = 0;
2965
2966 nr_active = this_rq->nr_running;
2967 nr_active += (long) this_rq->nr_uninterruptible;
2968
2969 if (nr_active != this_rq->calc_load_active) {
2970 delta = nr_active - this_rq->calc_load_active;
2971 this_rq->calc_load_active = nr_active;
2972 }
2973
2974 return delta;
2975}
2976
2977#ifdef CONFIG_NO_HZ
2978/*
2979 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
2980 *
2981 * When making the ILB scale, we should try to pull this in as well.
2982 */
2983static atomic_long_t calc_load_tasks_idle;
2984
2985static void calc_load_account_idle(struct rq *this_rq)
2986{
2987 long delta;
2988
2989 delta = calc_load_fold_active(this_rq);
2990 if (delta)
2991 atomic_long_add(delta, &calc_load_tasks_idle);
2992}
2993
2994static long calc_load_fold_idle(void)
2995{
2996 long delta = 0;
2997
2998 /*
2999 * Its got a race, we don't care...
3000 */
3001 if (atomic_long_read(&calc_load_tasks_idle))
3002 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3003
3004 return delta;
3005}
3006#else
3007static void calc_load_account_idle(struct rq *this_rq)
3008{
3009}
3010
3011static inline long calc_load_fold_idle(void)
3012{
3013 return 0;
3014}
3015#endif
3016
2d02494f
TG
3017/**
3018 * get_avenrun - get the load average array
3019 * @loads: pointer to dest load array
3020 * @offset: offset to add
3021 * @shift: shift count to shift the result left
3022 *
3023 * These values are estimates at best, so no need for locking.
3024 */
3025void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3026{
3027 loads[0] = (avenrun[0] + offset) << shift;
3028 loads[1] = (avenrun[1] + offset) << shift;
3029 loads[2] = (avenrun[2] + offset) << shift;
46cb4b7c 3030}
46cb4b7c 3031
dce48a84
TG
3032static unsigned long
3033calc_load(unsigned long load, unsigned long exp, unsigned long active)
db1b1fef 3034{
dce48a84
TG
3035 load *= exp;
3036 load += active * (FIXED_1 - exp);
3037 return load >> FSHIFT;
3038}
46cb4b7c
SS
3039
3040/*
dce48a84
TG
3041 * calc_load - update the avenrun load estimates 10 ticks after the
3042 * CPUs have updated calc_load_tasks.
7835b98b 3043 */
dce48a84 3044void calc_global_load(void)
7835b98b 3045{
dce48a84
TG
3046 unsigned long upd = calc_load_update + 10;
3047 long active;
1da177e4 3048
dce48a84
TG
3049 if (time_before(jiffies, upd))
3050 return;
1da177e4 3051
dce48a84
TG
3052 active = atomic_long_read(&calc_load_tasks);
3053 active = active > 0 ? active * FIXED_1 : 0;
1da177e4 3054
dce48a84
TG
3055 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3056 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3057 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
dd41f596 3058
dce48a84
TG
3059 calc_load_update += LOAD_FREQ;
3060}
1da177e4 3061
dce48a84 3062/*
74f5187a
PZ
3063 * Called from update_cpu_load() to periodically update this CPU's
3064 * active count.
dce48a84
TG
3065 */
3066static void calc_load_account_active(struct rq *this_rq)
3067{
74f5187a 3068 long delta;
08c183f3 3069
74f5187a
PZ
3070 if (time_before(jiffies, this_rq->calc_load_update))
3071 return;
783609c6 3072
74f5187a
PZ
3073 delta = calc_load_fold_active(this_rq);
3074 delta += calc_load_fold_idle();
3075 if (delta)
dce48a84 3076 atomic_long_add(delta, &calc_load_tasks);
74f5187a
PZ
3077
3078 this_rq->calc_load_update += LOAD_FREQ;
46cb4b7c
SS
3079}
3080
fdf3e95d
VP
3081/*
3082 * The exact cpuload at various idx values, calculated at every tick would be
3083 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3084 *
3085 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3086 * on nth tick when cpu may be busy, then we have:
3087 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3088 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3089 *
3090 * decay_load_missed() below does efficient calculation of
3091 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3092 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3093 *
3094 * The calculation is approximated on a 128 point scale.
3095 * degrade_zero_ticks is the number of ticks after which load at any
3096 * particular idx is approximated to be zero.
3097 * degrade_factor is a precomputed table, a row for each load idx.
3098 * Each column corresponds to degradation factor for a power of two ticks,
3099 * based on 128 point scale.
3100 * Example:
3101 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3102 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3103 *
3104 * With this power of 2 load factors, we can degrade the load n times
3105 * by looking at 1 bits in n and doing as many mult/shift instead of
3106 * n mult/shifts needed by the exact degradation.
3107 */
3108#define DEGRADE_SHIFT 7
3109static const unsigned char
3110 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3111static const unsigned char
3112 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3113 {0, 0, 0, 0, 0, 0, 0, 0},
3114 {64, 32, 8, 0, 0, 0, 0, 0},
3115 {96, 72, 40, 12, 1, 0, 0},
3116 {112, 98, 75, 43, 15, 1, 0},
3117 {120, 112, 98, 76, 45, 16, 2} };
3118
3119/*
3120 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3121 * would be when CPU is idle and so we just decay the old load without
3122 * adding any new load.
3123 */
3124static unsigned long
3125decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3126{
3127 int j = 0;
3128
3129 if (!missed_updates)
3130 return load;
3131
3132 if (missed_updates >= degrade_zero_ticks[idx])
3133 return 0;
3134
3135 if (idx == 1)
3136 return load >> missed_updates;
3137
3138 while (missed_updates) {
3139 if (missed_updates % 2)
3140 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3141
3142 missed_updates >>= 1;
3143 j++;
3144 }
3145 return load;
3146}
3147
46cb4b7c 3148/*
dd41f596 3149 * Update rq->cpu_load[] statistics. This function is usually called every
fdf3e95d
VP
3150 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3151 * every tick. We fix it up based on jiffies.
46cb4b7c 3152 */
dd41f596 3153static void update_cpu_load(struct rq *this_rq)
46cb4b7c 3154{
495eca49 3155 unsigned long this_load = this_rq->load.weight;
fdf3e95d
VP
3156 unsigned long curr_jiffies = jiffies;
3157 unsigned long pending_updates;
dd41f596 3158 int i, scale;
46cb4b7c 3159
dd41f596 3160 this_rq->nr_load_updates++;
46cb4b7c 3161
fdf3e95d
VP
3162 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3163 if (curr_jiffies == this_rq->last_load_update_tick)
3164 return;
3165
3166 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3167 this_rq->last_load_update_tick = curr_jiffies;
3168
dd41f596 3169 /* Update our load: */
fdf3e95d
VP
3170 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3171 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
dd41f596 3172 unsigned long old_load, new_load;
7d1e6a9b 3173
dd41f596 3174 /* scale is effectively 1 << i now, and >> i divides by scale */
46cb4b7c 3175
dd41f596 3176 old_load = this_rq->cpu_load[i];
fdf3e95d 3177 old_load = decay_load_missed(old_load, pending_updates - 1, i);
dd41f596 3178 new_load = this_load;
a25707f3
IM
3179 /*
3180 * Round up the averaging division if load is increasing. This
3181 * prevents us from getting stuck on 9 if the load is 10, for
3182 * example.
3183 */
3184 if (new_load > old_load)
fdf3e95d
VP
3185 new_load += scale - 1;
3186
3187 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
dd41f596 3188 }
da2b71ed
SS
3189
3190 sched_avg_update(this_rq);
fdf3e95d
VP
3191}
3192
3193static void update_cpu_load_active(struct rq *this_rq)
3194{
3195 update_cpu_load(this_rq);
46cb4b7c 3196
74f5187a 3197 calc_load_account_active(this_rq);
46cb4b7c
SS
3198}
3199
dd41f596 3200#ifdef CONFIG_SMP
8a0be9ef 3201
46cb4b7c 3202/*
38022906
PZ
3203 * sched_exec - execve() is a valuable balancing opportunity, because at
3204 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 3205 */
38022906 3206void sched_exec(void)
46cb4b7c 3207{
38022906 3208 struct task_struct *p = current;
1da177e4 3209 unsigned long flags;
70b97a7f 3210 struct rq *rq;
0017d735 3211 int dest_cpu;
46cb4b7c 3212
1da177e4 3213 rq = task_rq_lock(p, &flags);
0017d735
PZ
3214 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3215 if (dest_cpu == smp_processor_id())
3216 goto unlock;
38022906 3217
46cb4b7c 3218 /*
38022906 3219 * select_task_rq() can race against ->cpus_allowed
46cb4b7c 3220 */
30da688e 3221 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
969c7921
TH
3222 likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
3223 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 3224
1da177e4 3225 task_rq_unlock(rq, &flags);
969c7921 3226 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1da177e4
LT
3227 return;
3228 }
0017d735 3229unlock:
1da177e4 3230 task_rq_unlock(rq, &flags);
1da177e4 3231}
dd41f596 3232
1da177e4
LT
3233#endif
3234
1da177e4
LT
3235DEFINE_PER_CPU(struct kernel_stat, kstat);
3236
3237EXPORT_PER_CPU_SYMBOL(kstat);
3238
3239/*
c5f8d995 3240 * Return any ns on the sched_clock that have not yet been accounted in
f06febc9 3241 * @p in case that task is currently running.
c5f8d995
HS
3242 *
3243 * Called with task_rq_lock() held on @rq.
1da177e4 3244 */
c5f8d995
HS
3245static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3246{
3247 u64 ns = 0;
3248
3249 if (task_current(rq, p)) {
3250 update_rq_clock(rq);
3251 ns = rq->clock - p->se.exec_start;
3252 if ((s64)ns < 0)
3253 ns = 0;
3254 }
3255
3256 return ns;
3257}
3258
bb34d92f 3259unsigned long long task_delta_exec(struct task_struct *p)
1da177e4 3260{
1da177e4 3261 unsigned long flags;
41b86e9c 3262 struct rq *rq;
bb34d92f 3263 u64 ns = 0;
48f24c4d 3264
41b86e9c 3265 rq = task_rq_lock(p, &flags);
c5f8d995
HS
3266 ns = do_task_delta_exec(p, rq);
3267 task_rq_unlock(rq, &flags);
1508487e 3268
c5f8d995
HS
3269 return ns;
3270}
f06febc9 3271
c5f8d995
HS
3272/*
3273 * Return accounted runtime for the task.
3274 * In case the task is currently running, return the runtime plus current's
3275 * pending runtime that have not been accounted yet.
3276 */
3277unsigned long long task_sched_runtime(struct task_struct *p)
3278{
3279 unsigned long flags;
3280 struct rq *rq;
3281 u64 ns = 0;
3282
3283 rq = task_rq_lock(p, &flags);
3284 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3285 task_rq_unlock(rq, &flags);
3286
3287 return ns;
3288}
48f24c4d 3289
c5f8d995
HS
3290/*
3291 * Return sum_exec_runtime for the thread group.
3292 * In case the task is currently running, return the sum plus current's
3293 * pending runtime that have not been accounted yet.
3294 *
3295 * Note that the thread group might have other running tasks as well,
3296 * so the return value not includes other pending runtime that other
3297 * running tasks might have.
3298 */
3299unsigned long long thread_group_sched_runtime(struct task_struct *p)
3300{
3301 struct task_cputime totals;
3302 unsigned long flags;
3303 struct rq *rq;
3304 u64 ns;
3305
3306 rq = task_rq_lock(p, &flags);
3307 thread_group_cputime(p, &totals);
3308 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
41b86e9c 3309 task_rq_unlock(rq, &flags);
48f24c4d 3310
1da177e4
LT
3311 return ns;
3312}
3313
1da177e4
LT
3314/*
3315 * Account user cpu time to a process.
3316 * @p: the process that the cpu time gets accounted to
1da177e4 3317 * @cputime: the cpu time spent in user space since the last update
457533a7 3318 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4 3319 */
457533a7
MS
3320void account_user_time(struct task_struct *p, cputime_t cputime,
3321 cputime_t cputime_scaled)
1da177e4
LT
3322{
3323 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3324 cputime64_t tmp;
3325
457533a7 3326 /* Add user time to process. */
1da177e4 3327 p->utime = cputime_add(p->utime, cputime);
457533a7 3328 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 3329 account_group_user_time(p, cputime);
1da177e4
LT
3330
3331 /* Add user time to cpustat. */
3332 tmp = cputime_to_cputime64(cputime);
3333 if (TASK_NICE(p) > 0)
3334 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3335 else
3336 cpustat->user = cputime64_add(cpustat->user, tmp);
ef12fefa
BR
3337
3338 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
49b5cf34
JL
3339 /* Account for user time used */
3340 acct_update_integrals(p);
1da177e4
LT
3341}
3342
94886b84
LV
3343/*
3344 * Account guest cpu time to a process.
3345 * @p: the process that the cpu time gets accounted to
3346 * @cputime: the cpu time spent in virtual machine since the last update
457533a7 3347 * @cputime_scaled: cputime scaled by cpu frequency
94886b84 3348 */
457533a7
MS
3349static void account_guest_time(struct task_struct *p, cputime_t cputime,
3350 cputime_t cputime_scaled)
94886b84
LV
3351{
3352 cputime64_t tmp;
3353 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3354
3355 tmp = cputime_to_cputime64(cputime);
3356
457533a7 3357 /* Add guest time to process. */
94886b84 3358 p->utime = cputime_add(p->utime, cputime);
457533a7 3359 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 3360 account_group_user_time(p, cputime);
94886b84
LV
3361 p->gtime = cputime_add(p->gtime, cputime);
3362
457533a7 3363 /* Add guest time to cpustat. */
ce0e7b28
RO
3364 if (TASK_NICE(p) > 0) {
3365 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3366 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3367 } else {
3368 cpustat->user = cputime64_add(cpustat->user, tmp);
3369 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3370 }
94886b84
LV
3371}
3372
1da177e4
LT
3373/*
3374 * Account system cpu time to a process.
3375 * @p: the process that the cpu time gets accounted to
3376 * @hardirq_offset: the offset to subtract from hardirq_count()
3377 * @cputime: the cpu time spent in kernel space since the last update
457533a7 3378 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4
LT
3379 */
3380void account_system_time(struct task_struct *p, int hardirq_offset,
457533a7 3381 cputime_t cputime, cputime_t cputime_scaled)
1da177e4
LT
3382{
3383 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1da177e4
LT
3384 cputime64_t tmp;
3385
983ed7a6 3386 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
457533a7 3387 account_guest_time(p, cputime, cputime_scaled);
983ed7a6
HH
3388 return;
3389 }
94886b84 3390
457533a7 3391 /* Add system time to process. */
1da177e4 3392 p->stime = cputime_add(p->stime, cputime);
457533a7 3393 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
f06febc9 3394 account_group_system_time(p, cputime);
1da177e4
LT
3395
3396 /* Add system time to cpustat. */
3397 tmp = cputime_to_cputime64(cputime);
3398 if (hardirq_count() - hardirq_offset)
3399 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3400 else if (softirq_count())
3401 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
1da177e4 3402 else
79741dd3
MS
3403 cpustat->system = cputime64_add(cpustat->system, tmp);
3404
ef12fefa
BR
3405 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3406
1da177e4
LT
3407 /* Account for system time used */
3408 acct_update_integrals(p);
1da177e4
LT
3409}
3410
c66f08be 3411/*
1da177e4 3412 * Account for involuntary wait time.
1da177e4 3413 * @steal: the cpu time spent in involuntary wait
c66f08be 3414 */
79741dd3 3415void account_steal_time(cputime_t cputime)
c66f08be 3416{
79741dd3
MS
3417 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3418 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3419
3420 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
c66f08be
MN
3421}
3422
1da177e4 3423/*
79741dd3
MS
3424 * Account for idle time.
3425 * @cputime: the cpu time spent in idle wait
1da177e4 3426 */
79741dd3 3427void account_idle_time(cputime_t cputime)
1da177e4
LT
3428{
3429 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
79741dd3 3430 cputime64_t cputime64 = cputime_to_cputime64(cputime);
70b97a7f 3431 struct rq *rq = this_rq();
1da177e4 3432
79741dd3
MS
3433 if (atomic_read(&rq->nr_iowait) > 0)
3434 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3435 else
3436 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
1da177e4
LT
3437}
3438
79741dd3
MS
3439#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3440
3441/*
3442 * Account a single tick of cpu time.
3443 * @p: the process that the cpu time gets accounted to
3444 * @user_tick: indicates if the tick is a user or a system tick
3445 */
3446void account_process_tick(struct task_struct *p, int user_tick)
3447{
a42548a1 3448 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
79741dd3
MS
3449 struct rq *rq = this_rq();
3450
3451 if (user_tick)
a42548a1 3452 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
f5f293a4 3453 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
a42548a1 3454 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
79741dd3
MS
3455 one_jiffy_scaled);
3456 else
a42548a1 3457 account_idle_time(cputime_one_jiffy);
79741dd3
MS
3458}
3459
3460/*
3461 * Account multiple ticks of steal time.
3462 * @p: the process from which the cpu time has been stolen
3463 * @ticks: number of stolen ticks
3464 */
3465void account_steal_ticks(unsigned long ticks)
3466{
3467 account_steal_time(jiffies_to_cputime(ticks));
3468}
3469
3470/*
3471 * Account multiple ticks of idle time.
3472 * @ticks: number of stolen ticks
3473 */
3474void account_idle_ticks(unsigned long ticks)
3475{
3476 account_idle_time(jiffies_to_cputime(ticks));
1da177e4
LT
3477}
3478
79741dd3
MS
3479#endif
3480
49048622
BS
3481/*
3482 * Use precise platform statistics if available:
3483 */
3484#ifdef CONFIG_VIRT_CPU_ACCOUNTING
d180c5bc 3485void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 3486{
d99ca3b9
HS
3487 *ut = p->utime;
3488 *st = p->stime;
49048622
BS
3489}
3490
0cf55e1e 3491void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 3492{
0cf55e1e
HS
3493 struct task_cputime cputime;
3494
3495 thread_group_cputime(p, &cputime);
3496
3497 *ut = cputime.utime;
3498 *st = cputime.stime;
49048622
BS
3499}
3500#else
761b1d26
HS
3501
3502#ifndef nsecs_to_cputime
b7b20df9 3503# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
761b1d26
HS
3504#endif
3505
d180c5bc 3506void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 3507{
d99ca3b9 3508 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
49048622
BS
3509
3510 /*
3511 * Use CFS's precise accounting:
3512 */
d180c5bc 3513 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
49048622
BS
3514
3515 if (total) {
d180c5bc
HS
3516 u64 temp;
3517
3518 temp = (u64)(rtime * utime);
49048622 3519 do_div(temp, total);
d180c5bc
HS
3520 utime = (cputime_t)temp;
3521 } else
3522 utime = rtime;
49048622 3523
d180c5bc
HS
3524 /*
3525 * Compare with previous values, to keep monotonicity:
3526 */
761b1d26 3527 p->prev_utime = max(p->prev_utime, utime);
d99ca3b9 3528 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
49048622 3529
d99ca3b9
HS
3530 *ut = p->prev_utime;
3531 *st = p->prev_stime;
49048622
BS
3532}
3533
0cf55e1e
HS
3534/*
3535 * Must be called with siglock held.
3536 */
3537void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 3538{
0cf55e1e
HS
3539 struct signal_struct *sig = p->signal;
3540 struct task_cputime cputime;
3541 cputime_t rtime, utime, total;
49048622 3542
0cf55e1e 3543 thread_group_cputime(p, &cputime);
49048622 3544
0cf55e1e
HS
3545 total = cputime_add(cputime.utime, cputime.stime);
3546 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
49048622 3547
0cf55e1e
HS
3548 if (total) {
3549 u64 temp;
49048622 3550
0cf55e1e
HS
3551 temp = (u64)(rtime * cputime.utime);
3552 do_div(temp, total);
3553 utime = (cputime_t)temp;
3554 } else
3555 utime = rtime;
3556
3557 sig->prev_utime = max(sig->prev_utime, utime);
3558 sig->prev_stime = max(sig->prev_stime,
3559 cputime_sub(rtime, sig->prev_utime));
3560
3561 *ut = sig->prev_utime;
3562 *st = sig->prev_stime;
49048622 3563}
49048622 3564#endif
49048622 3565
7835b98b
CL
3566/*
3567 * This function gets called by the timer code, with HZ frequency.
3568 * We call it with interrupts disabled.
3569 *
3570 * It also gets called by the fork code, when changing the parent's
3571 * timeslices.
3572 */
3573void scheduler_tick(void)
3574{
7835b98b
CL
3575 int cpu = smp_processor_id();
3576 struct rq *rq = cpu_rq(cpu);
dd41f596 3577 struct task_struct *curr = rq->curr;
3e51f33f
PZ
3578
3579 sched_clock_tick();
dd41f596 3580
05fa785c 3581 raw_spin_lock(&rq->lock);
3e51f33f 3582 update_rq_clock(rq);
fdf3e95d 3583 update_cpu_load_active(rq);
fa85ae24 3584 curr->sched_class->task_tick(rq, curr, 0);
05fa785c 3585 raw_spin_unlock(&rq->lock);
7835b98b 3586
e418e1c2 3587#ifdef CONFIG_SMP
dd41f596
IM
3588 rq->idle_at_tick = idle_cpu(cpu);
3589 trigger_load_balance(rq, cpu);
e418e1c2 3590#endif
1da177e4
LT
3591}
3592
132380a0 3593notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
3594{
3595 if (in_lock_functions(addr)) {
3596 addr = CALLER_ADDR2;
3597 if (in_lock_functions(addr))
3598 addr = CALLER_ADDR3;
3599 }
3600 return addr;
3601}
1da177e4 3602
7e49fcce
SR
3603#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3604 defined(CONFIG_PREEMPT_TRACER))
3605
43627582 3606void __kprobes add_preempt_count(int val)
1da177e4 3607{
6cd8a4bb 3608#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3609 /*
3610 * Underflow?
3611 */
9a11b49a
IM
3612 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3613 return;
6cd8a4bb 3614#endif
1da177e4 3615 preempt_count() += val;
6cd8a4bb 3616#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3617 /*
3618 * Spinlock count overflowing soon?
3619 */
33859f7f
MOS
3620 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3621 PREEMPT_MASK - 10);
6cd8a4bb
SR
3622#endif
3623 if (preempt_count() == val)
3624 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
3625}
3626EXPORT_SYMBOL(add_preempt_count);
3627
43627582 3628void __kprobes sub_preempt_count(int val)
1da177e4 3629{
6cd8a4bb 3630#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
3631 /*
3632 * Underflow?
3633 */
01e3eb82 3634 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 3635 return;
1da177e4
LT
3636 /*
3637 * Is the spinlock portion underflowing?
3638 */
9a11b49a
IM
3639 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3640 !(preempt_count() & PREEMPT_MASK)))
3641 return;
6cd8a4bb 3642#endif
9a11b49a 3643
6cd8a4bb
SR
3644 if (preempt_count() == val)
3645 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
3646 preempt_count() -= val;
3647}
3648EXPORT_SYMBOL(sub_preempt_count);
3649
3650#endif
3651
3652/*
dd41f596 3653 * Print scheduling while atomic bug:
1da177e4 3654 */
dd41f596 3655static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 3656{
838225b4
SS
3657 struct pt_regs *regs = get_irq_regs();
3658
3df0fc5b
PZ
3659 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3660 prev->comm, prev->pid, preempt_count());
838225b4 3661
dd41f596 3662 debug_show_held_locks(prev);
e21f5b15 3663 print_modules();
dd41f596
IM
3664 if (irqs_disabled())
3665 print_irqtrace_events(prev);
838225b4
SS
3666
3667 if (regs)
3668 show_regs(regs);
3669 else
3670 dump_stack();
dd41f596 3671}
1da177e4 3672
dd41f596
IM
3673/*
3674 * Various schedule()-time debugging checks and statistics:
3675 */
3676static inline void schedule_debug(struct task_struct *prev)
3677{
1da177e4 3678 /*
41a2d6cf 3679 * Test if we are atomic. Since do_exit() needs to call into
1da177e4
LT
3680 * schedule() atomically, we ignore that path for now.
3681 * Otherwise, whine if we are scheduling when we should not be.
3682 */
3f33a7ce 3683 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
dd41f596
IM
3684 __schedule_bug(prev);
3685
1da177e4
LT
3686 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3687
2d72376b 3688 schedstat_inc(this_rq(), sched_count);
b8efb561
IM
3689#ifdef CONFIG_SCHEDSTATS
3690 if (unlikely(prev->lock_depth >= 0)) {
2d72376b
IM
3691 schedstat_inc(this_rq(), bkl_count);
3692 schedstat_inc(prev, sched_info.bkl_count);
b8efb561
IM
3693 }
3694#endif
dd41f596
IM
3695}
3696
6cecd084 3697static void put_prev_task(struct rq *rq, struct task_struct *prev)
df1c99d4 3698{
a64692a3
MG
3699 if (prev->se.on_rq)
3700 update_rq_clock(rq);
3701 rq->skip_clock_update = 0;
6cecd084 3702 prev->sched_class->put_prev_task(rq, prev);
df1c99d4
MG
3703}
3704
dd41f596
IM
3705/*
3706 * Pick up the highest-prio task:
3707 */
3708static inline struct task_struct *
b67802ea 3709pick_next_task(struct rq *rq)
dd41f596 3710{
5522d5d5 3711 const struct sched_class *class;
dd41f596 3712 struct task_struct *p;
1da177e4
LT
3713
3714 /*
dd41f596
IM
3715 * Optimization: we know that if all tasks are in
3716 * the fair class we can call that function directly:
1da177e4 3717 */
dd41f596 3718 if (likely(rq->nr_running == rq->cfs.nr_running)) {
fb8d4724 3719 p = fair_sched_class.pick_next_task(rq);
dd41f596
IM
3720 if (likely(p))
3721 return p;
1da177e4
LT
3722 }
3723
dd41f596
IM
3724 class = sched_class_highest;
3725 for ( ; ; ) {
fb8d4724 3726 p = class->pick_next_task(rq);
dd41f596
IM
3727 if (p)
3728 return p;
3729 /*
3730 * Will never be NULL as the idle class always
3731 * returns a non-NULL p:
3732 */
3733 class = class->next;
3734 }
3735}
1da177e4 3736
dd41f596
IM
3737/*
3738 * schedule() is the main scheduler function.
3739 */
ff743345 3740asmlinkage void __sched schedule(void)
dd41f596
IM
3741{
3742 struct task_struct *prev, *next;
67ca7bde 3743 unsigned long *switch_count;
dd41f596 3744 struct rq *rq;
31656519 3745 int cpu;
dd41f596 3746
ff743345
PZ
3747need_resched:
3748 preempt_disable();
dd41f596
IM
3749 cpu = smp_processor_id();
3750 rq = cpu_rq(cpu);
25502a6c 3751 rcu_note_context_switch(cpu);
dd41f596 3752 prev = rq->curr;
dd41f596
IM
3753
3754 release_kernel_lock(prev);
3755need_resched_nonpreemptible:
3756
3757 schedule_debug(prev);
1da177e4 3758
31656519 3759 if (sched_feat(HRTICK))
f333fdc9 3760 hrtick_clear(rq);
8f4d37ec 3761
05fa785c 3762 raw_spin_lock_irq(&rq->lock);
1e819950 3763 clear_tsk_need_resched(prev);
1da177e4 3764
246d86b5 3765 switch_count = &prev->nivcsw;
1da177e4 3766 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
21aa9af0 3767 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 3768 prev->state = TASK_RUNNING;
21aa9af0
TH
3769 } else {
3770 /*
3771 * If a worker is going to sleep, notify and
3772 * ask workqueue whether it wants to wake up a
3773 * task to maintain concurrency. If so, wake
3774 * up the task.
3775 */
3776 if (prev->flags & PF_WQ_WORKER) {
3777 struct task_struct *to_wakeup;
3778
3779 to_wakeup = wq_worker_sleeping(prev, cpu);
3780 if (to_wakeup)
3781 try_to_wake_up_local(to_wakeup);
3782 }
371fd7e7 3783 deactivate_task(rq, prev, DEQUEUE_SLEEP);
21aa9af0 3784 }
dd41f596 3785 switch_count = &prev->nvcsw;
1da177e4
LT
3786 }
3787
3f029d3c 3788 pre_schedule(rq, prev);
f65eda4f 3789
dd41f596 3790 if (unlikely(!rq->nr_running))
1da177e4 3791 idle_balance(cpu, rq);
1da177e4 3792
df1c99d4 3793 put_prev_task(rq, prev);
b67802ea 3794 next = pick_next_task(rq);
1da177e4 3795
1da177e4 3796 if (likely(prev != next)) {
673a90a1 3797 sched_info_switch(prev, next);
49f47433 3798 perf_event_task_sched_out(prev, next);
673a90a1 3799
1da177e4
LT
3800 rq->nr_switches++;
3801 rq->curr = next;
3802 ++*switch_count;
3803
dd41f596 3804 context_switch(rq, prev, next); /* unlocks the rq */
8f4d37ec 3805 /*
246d86b5
ON
3806 * The context switch have flipped the stack from under us
3807 * and restored the local variables which were saved when
3808 * this task called schedule() in the past. prev == current
3809 * is still correct, but it can be moved to another cpu/rq.
8f4d37ec
PZ
3810 */
3811 cpu = smp_processor_id();
3812 rq = cpu_rq(cpu);
1da177e4 3813 } else
05fa785c 3814 raw_spin_unlock_irq(&rq->lock);
1da177e4 3815
3f029d3c 3816 post_schedule(rq);
1da177e4 3817
246d86b5 3818 if (unlikely(reacquire_kernel_lock(prev)))
1da177e4 3819 goto need_resched_nonpreemptible;
8f4d37ec 3820
1da177e4 3821 preempt_enable_no_resched();
ff743345 3822 if (need_resched())
1da177e4
LT
3823 goto need_resched;
3824}
1da177e4
LT
3825EXPORT_SYMBOL(schedule);
3826
c08f7829 3827#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d
PZ
3828/*
3829 * Look out! "owner" is an entirely speculative pointer
3830 * access and not reliable.
3831 */
3832int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3833{
3834 unsigned int cpu;
3835 struct rq *rq;
3836
3837 if (!sched_feat(OWNER_SPIN))
3838 return 0;
3839
3840#ifdef CONFIG_DEBUG_PAGEALLOC
3841 /*
3842 * Need to access the cpu field knowing that
3843 * DEBUG_PAGEALLOC could have unmapped it if
3844 * the mutex owner just released it and exited.
3845 */
3846 if (probe_kernel_address(&owner->cpu, cpu))
4b402210 3847 return 0;
0d66bf6d
PZ
3848#else
3849 cpu = owner->cpu;
3850#endif
3851
3852 /*
3853 * Even if the access succeeded (likely case),
3854 * the cpu field may no longer be valid.
3855 */
3856 if (cpu >= nr_cpumask_bits)
4b402210 3857 return 0;
0d66bf6d
PZ
3858
3859 /*
3860 * We need to validate that we can do a
3861 * get_cpu() and that we have the percpu area.
3862 */
3863 if (!cpu_online(cpu))
4b402210 3864 return 0;
0d66bf6d
PZ
3865
3866 rq = cpu_rq(cpu);
3867
3868 for (;;) {
3869 /*
3870 * Owner changed, break to re-assess state.
3871 */
9d0f4dcc
TC
3872 if (lock->owner != owner) {
3873 /*
3874 * If the lock has switched to a different owner,
3875 * we likely have heavy contention. Return 0 to quit
3876 * optimistic spinning and not contend further:
3877 */
3878 if (lock->owner)
3879 return 0;
0d66bf6d 3880 break;
9d0f4dcc 3881 }
0d66bf6d
PZ
3882
3883 /*
3884 * Is that owner really running on that cpu?
3885 */
3886 if (task_thread_info(rq->curr) != owner || need_resched())
3887 return 0;
3888
3889 cpu_relax();
3890 }
4b402210 3891
0d66bf6d
PZ
3892 return 1;
3893}
3894#endif
3895
1da177e4
LT
3896#ifdef CONFIG_PREEMPT
3897/*
2ed6e34f 3898 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 3899 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
3900 * occur there and call schedule directly.
3901 */
d1f74e20 3902asmlinkage void __sched notrace preempt_schedule(void)
1da177e4
LT
3903{
3904 struct thread_info *ti = current_thread_info();
6478d880 3905
1da177e4
LT
3906 /*
3907 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 3908 * we do not want to preempt the current task. Just return..
1da177e4 3909 */
beed33a8 3910 if (likely(ti->preempt_count || irqs_disabled()))
1da177e4
LT
3911 return;
3912
3a5c359a 3913 do {
d1f74e20 3914 add_preempt_count_notrace(PREEMPT_ACTIVE);
3a5c359a 3915 schedule();
d1f74e20 3916 sub_preempt_count_notrace(PREEMPT_ACTIVE);
1da177e4 3917
3a5c359a
AK
3918 /*
3919 * Check again in case we missed a preemption opportunity
3920 * between schedule and now.
3921 */
3922 barrier();
5ed0cec0 3923 } while (need_resched());
1da177e4 3924}
1da177e4
LT
3925EXPORT_SYMBOL(preempt_schedule);
3926
3927/*
2ed6e34f 3928 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
3929 * off of irq context.
3930 * Note, that this is called and return with irqs disabled. This will
3931 * protect us against recursive calling from irq.
3932 */
3933asmlinkage void __sched preempt_schedule_irq(void)
3934{
3935 struct thread_info *ti = current_thread_info();
6478d880 3936
2ed6e34f 3937 /* Catch callers which need to be fixed */
1da177e4
LT
3938 BUG_ON(ti->preempt_count || !irqs_disabled());
3939
3a5c359a
AK
3940 do {
3941 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a
AK
3942 local_irq_enable();
3943 schedule();
3944 local_irq_disable();
3a5c359a 3945 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 3946
3a5c359a
AK
3947 /*
3948 * Check again in case we missed a preemption opportunity
3949 * between schedule and now.
3950 */
3951 barrier();
5ed0cec0 3952 } while (need_resched());
1da177e4
LT
3953}
3954
3955#endif /* CONFIG_PREEMPT */
3956
63859d4f 3957int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 3958 void *key)
1da177e4 3959{
63859d4f 3960 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 3961}
1da177e4
LT
3962EXPORT_SYMBOL(default_wake_function);
3963
3964/*
41a2d6cf
IM
3965 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3966 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1da177e4
LT
3967 * number) then we wake all the non-exclusive tasks and one exclusive task.
3968 *
3969 * There are circumstances in which we can try to wake a task which has already
41a2d6cf 3970 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1da177e4
LT
3971 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3972 */
78ddb08f 3973static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
63859d4f 3974 int nr_exclusive, int wake_flags, void *key)
1da177e4 3975{
2e45874c 3976 wait_queue_t *curr, *next;
1da177e4 3977
2e45874c 3978 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
48f24c4d
IM
3979 unsigned flags = curr->flags;
3980
63859d4f 3981 if (curr->func(curr, mode, wake_flags, key) &&
48f24c4d 3982 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1da177e4
LT
3983 break;
3984 }
3985}
3986
3987/**
3988 * __wake_up - wake up threads blocked on a waitqueue.
3989 * @q: the waitqueue
3990 * @mode: which threads
3991 * @nr_exclusive: how many wake-one or wake-many threads to wake up
67be2dd1 3992 * @key: is directly passed to the wakeup function
50fa610a
DH
3993 *
3994 * It may be assumed that this function implies a write memory barrier before
3995 * changing the task state if and only if any tasks are woken up.
1da177e4 3996 */
7ad5b3a5 3997void __wake_up(wait_queue_head_t *q, unsigned int mode,
95cdf3b7 3998 int nr_exclusive, void *key)
1da177e4
LT
3999{
4000 unsigned long flags;
4001
4002 spin_lock_irqsave(&q->lock, flags);
4003 __wake_up_common(q, mode, nr_exclusive, 0, key);
4004 spin_unlock_irqrestore(&q->lock, flags);
4005}
1da177e4
LT
4006EXPORT_SYMBOL(__wake_up);
4007
4008/*
4009 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4010 */
7ad5b3a5 4011void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
1da177e4
LT
4012{
4013 __wake_up_common(q, mode, 1, 0, NULL);
4014}
22c43c81 4015EXPORT_SYMBOL_GPL(__wake_up_locked);
1da177e4 4016
4ede816a
DL
4017void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4018{
4019 __wake_up_common(q, mode, 1, 0, key);
4020}
4021
1da177e4 4022/**
4ede816a 4023 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1da177e4
LT
4024 * @q: the waitqueue
4025 * @mode: which threads
4026 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4ede816a 4027 * @key: opaque value to be passed to wakeup targets
1da177e4
LT
4028 *
4029 * The sync wakeup differs that the waker knows that it will schedule
4030 * away soon, so while the target thread will be woken up, it will not
4031 * be migrated to another CPU - ie. the two threads are 'synchronized'
4032 * with each other. This can prevent needless bouncing between CPUs.
4033 *
4034 * On UP it can prevent extra preemption.
50fa610a
DH
4035 *
4036 * It may be assumed that this function implies a write memory barrier before
4037 * changing the task state if and only if any tasks are woken up.
1da177e4 4038 */
4ede816a
DL
4039void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4040 int nr_exclusive, void *key)
1da177e4
LT
4041{
4042 unsigned long flags;
7d478721 4043 int wake_flags = WF_SYNC;
1da177e4
LT
4044
4045 if (unlikely(!q))
4046 return;
4047
4048 if (unlikely(!nr_exclusive))
7d478721 4049 wake_flags = 0;
1da177e4
LT
4050
4051 spin_lock_irqsave(&q->lock, flags);
7d478721 4052 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
1da177e4
LT
4053 spin_unlock_irqrestore(&q->lock, flags);
4054}
4ede816a
DL
4055EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4056
4057/*
4058 * __wake_up_sync - see __wake_up_sync_key()
4059 */
4060void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4061{
4062 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4063}
1da177e4
LT
4064EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4065
65eb3dc6
KD
4066/**
4067 * complete: - signals a single thread waiting on this completion
4068 * @x: holds the state of this particular completion
4069 *
4070 * This will wake up a single thread waiting on this completion. Threads will be
4071 * awakened in the same order in which they were queued.
4072 *
4073 * See also complete_all(), wait_for_completion() and related routines.
50fa610a
DH
4074 *
4075 * It may be assumed that this function implies a write memory barrier before
4076 * changing the task state if and only if any tasks are woken up.
65eb3dc6 4077 */
b15136e9 4078void complete(struct completion *x)
1da177e4
LT
4079{
4080 unsigned long flags;
4081
4082 spin_lock_irqsave(&x->wait.lock, flags);
4083 x->done++;
d9514f6c 4084 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1da177e4
LT
4085 spin_unlock_irqrestore(&x->wait.lock, flags);
4086}
4087EXPORT_SYMBOL(complete);
4088
65eb3dc6
KD
4089/**
4090 * complete_all: - signals all threads waiting on this completion
4091 * @x: holds the state of this particular completion
4092 *
4093 * This will wake up all threads waiting on this particular completion event.
50fa610a
DH
4094 *
4095 * It may be assumed that this function implies a write memory barrier before
4096 * changing the task state if and only if any tasks are woken up.
65eb3dc6 4097 */
b15136e9 4098void complete_all(struct completion *x)
1da177e4
LT
4099{
4100 unsigned long flags;
4101
4102 spin_lock_irqsave(&x->wait.lock, flags);
4103 x->done += UINT_MAX/2;
d9514f6c 4104 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
1da177e4
LT
4105 spin_unlock_irqrestore(&x->wait.lock, flags);
4106}
4107EXPORT_SYMBOL(complete_all);
4108
8cbbe86d
AK
4109static inline long __sched
4110do_wait_for_common(struct completion *x, long timeout, int state)
1da177e4 4111{
1da177e4
LT
4112 if (!x->done) {
4113 DECLARE_WAITQUEUE(wait, current);
4114
a93d2f17 4115 __add_wait_queue_tail_exclusive(&x->wait, &wait);
1da177e4 4116 do {
94d3d824 4117 if (signal_pending_state(state, current)) {
ea71a546
ON
4118 timeout = -ERESTARTSYS;
4119 break;
8cbbe86d
AK
4120 }
4121 __set_current_state(state);
1da177e4
LT
4122 spin_unlock_irq(&x->wait.lock);
4123 timeout = schedule_timeout(timeout);
4124 spin_lock_irq(&x->wait.lock);
ea71a546 4125 } while (!x->done && timeout);
1da177e4 4126 __remove_wait_queue(&x->wait, &wait);
ea71a546
ON
4127 if (!x->done)
4128 return timeout;
1da177e4
LT
4129 }
4130 x->done--;
ea71a546 4131 return timeout ?: 1;
1da177e4 4132}
1da177e4 4133
8cbbe86d
AK
4134static long __sched
4135wait_for_common(struct completion *x, long timeout, int state)
1da177e4 4136{
1da177e4
LT
4137 might_sleep();
4138
4139 spin_lock_irq(&x->wait.lock);
8cbbe86d 4140 timeout = do_wait_for_common(x, timeout, state);
1da177e4 4141 spin_unlock_irq(&x->wait.lock);
8cbbe86d
AK
4142 return timeout;
4143}
1da177e4 4144
65eb3dc6
KD
4145/**
4146 * wait_for_completion: - waits for completion of a task
4147 * @x: holds the state of this particular completion
4148 *
4149 * This waits to be signaled for completion of a specific task. It is NOT
4150 * interruptible and there is no timeout.
4151 *
4152 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4153 * and interrupt capability. Also see complete().
4154 */
b15136e9 4155void __sched wait_for_completion(struct completion *x)
8cbbe86d
AK
4156{
4157 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1da177e4 4158}
8cbbe86d 4159EXPORT_SYMBOL(wait_for_completion);
1da177e4 4160
65eb3dc6
KD
4161/**
4162 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4163 * @x: holds the state of this particular completion
4164 * @timeout: timeout value in jiffies
4165 *
4166 * This waits for either a completion of a specific task to be signaled or for a
4167 * specified timeout to expire. The timeout is in jiffies. It is not
4168 * interruptible.
4169 */
b15136e9 4170unsigned long __sched
8cbbe86d 4171wait_for_completion_timeout(struct completion *x, unsigned long timeout)
1da177e4 4172{
8cbbe86d 4173 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
1da177e4 4174}
8cbbe86d 4175EXPORT_SYMBOL(wait_for_completion_timeout);
1da177e4 4176
65eb3dc6
KD
4177/**
4178 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4179 * @x: holds the state of this particular completion
4180 *
4181 * This waits for completion of a specific task to be signaled. It is
4182 * interruptible.
4183 */
8cbbe86d 4184int __sched wait_for_completion_interruptible(struct completion *x)
0fec171c 4185{
51e97990
AK
4186 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4187 if (t == -ERESTARTSYS)
4188 return t;
4189 return 0;
0fec171c 4190}
8cbbe86d 4191EXPORT_SYMBOL(wait_for_completion_interruptible);
1da177e4 4192
65eb3dc6
KD
4193/**
4194 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4195 * @x: holds the state of this particular completion
4196 * @timeout: timeout value in jiffies
4197 *
4198 * This waits for either a completion of a specific task to be signaled or for a
4199 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4200 */
b15136e9 4201unsigned long __sched
8cbbe86d
AK
4202wait_for_completion_interruptible_timeout(struct completion *x,
4203 unsigned long timeout)
0fec171c 4204{
8cbbe86d 4205 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
0fec171c 4206}
8cbbe86d 4207EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
1da177e4 4208
65eb3dc6
KD
4209/**
4210 * wait_for_completion_killable: - waits for completion of a task (killable)
4211 * @x: holds the state of this particular completion
4212 *
4213 * This waits to be signaled for completion of a specific task. It can be
4214 * interrupted by a kill signal.
4215 */
009e577e
MW
4216int __sched wait_for_completion_killable(struct completion *x)
4217{
4218 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4219 if (t == -ERESTARTSYS)
4220 return t;
4221 return 0;
4222}
4223EXPORT_SYMBOL(wait_for_completion_killable);
4224
0aa12fb4
SW
4225/**
4226 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4227 * @x: holds the state of this particular completion
4228 * @timeout: timeout value in jiffies
4229 *
4230 * This waits for either a completion of a specific task to be
4231 * signaled or for a specified timeout to expire. It can be
4232 * interrupted by a kill signal. The timeout is in jiffies.
4233 */
4234unsigned long __sched
4235wait_for_completion_killable_timeout(struct completion *x,
4236 unsigned long timeout)
4237{
4238 return wait_for_common(x, timeout, TASK_KILLABLE);
4239}
4240EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4241
be4de352
DC
4242/**
4243 * try_wait_for_completion - try to decrement a completion without blocking
4244 * @x: completion structure
4245 *
4246 * Returns: 0 if a decrement cannot be done without blocking
4247 * 1 if a decrement succeeded.
4248 *
4249 * If a completion is being used as a counting completion,
4250 * attempt to decrement the counter without blocking. This
4251 * enables us to avoid waiting if the resource the completion
4252 * is protecting is not available.
4253 */
4254bool try_wait_for_completion(struct completion *x)
4255{
7539a3b3 4256 unsigned long flags;
be4de352
DC
4257 int ret = 1;
4258
7539a3b3 4259 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
4260 if (!x->done)
4261 ret = 0;
4262 else
4263 x->done--;
7539a3b3 4264 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
4265 return ret;
4266}
4267EXPORT_SYMBOL(try_wait_for_completion);
4268
4269/**
4270 * completion_done - Test to see if a completion has any waiters
4271 * @x: completion structure
4272 *
4273 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4274 * 1 if there are no waiters.
4275 *
4276 */
4277bool completion_done(struct completion *x)
4278{
7539a3b3 4279 unsigned long flags;
be4de352
DC
4280 int ret = 1;
4281
7539a3b3 4282 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
4283 if (!x->done)
4284 ret = 0;
7539a3b3 4285 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
4286 return ret;
4287}
4288EXPORT_SYMBOL(completion_done);
4289
8cbbe86d
AK
4290static long __sched
4291sleep_on_common(wait_queue_head_t *q, int state, long timeout)
1da177e4 4292{
0fec171c
IM
4293 unsigned long flags;
4294 wait_queue_t wait;
4295
4296 init_waitqueue_entry(&wait, current);
1da177e4 4297
8cbbe86d 4298 __set_current_state(state);
1da177e4 4299
8cbbe86d
AK
4300 spin_lock_irqsave(&q->lock, flags);
4301 __add_wait_queue(q, &wait);
4302 spin_unlock(&q->lock);
4303 timeout = schedule_timeout(timeout);
4304 spin_lock_irq(&q->lock);
4305 __remove_wait_queue(q, &wait);
4306 spin_unlock_irqrestore(&q->lock, flags);
4307
4308 return timeout;
4309}
4310
4311void __sched interruptible_sleep_on(wait_queue_head_t *q)
4312{
4313 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 4314}
1da177e4
LT
4315EXPORT_SYMBOL(interruptible_sleep_on);
4316
0fec171c 4317long __sched
95cdf3b7 4318interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 4319{
8cbbe86d 4320 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
1da177e4 4321}
1da177e4
LT
4322EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4323
0fec171c 4324void __sched sleep_on(wait_queue_head_t *q)
1da177e4 4325{
8cbbe86d 4326 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 4327}
1da177e4
LT
4328EXPORT_SYMBOL(sleep_on);
4329
0fec171c 4330long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 4331{
8cbbe86d 4332 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
1da177e4 4333}
1da177e4
LT
4334EXPORT_SYMBOL(sleep_on_timeout);
4335
b29739f9
IM
4336#ifdef CONFIG_RT_MUTEXES
4337
4338/*
4339 * rt_mutex_setprio - set the current priority of a task
4340 * @p: task
4341 * @prio: prio value (kernel-internal form)
4342 *
4343 * This function changes the 'effective' priority of a task. It does
4344 * not touch ->normal_prio like __setscheduler().
4345 *
4346 * Used by the rt_mutex code to implement priority inheritance logic.
4347 */
36c8b586 4348void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9
IM
4349{
4350 unsigned long flags;
83b699ed 4351 int oldprio, on_rq, running;
70b97a7f 4352 struct rq *rq;
83ab0aa0 4353 const struct sched_class *prev_class;
b29739f9
IM
4354
4355 BUG_ON(prio < 0 || prio > MAX_PRIO);
4356
4357 rq = task_rq_lock(p, &flags);
4358
d5f9f942 4359 oldprio = p->prio;
83ab0aa0 4360 prev_class = p->sched_class;
dd41f596 4361 on_rq = p->se.on_rq;
051a1d1a 4362 running = task_current(rq, p);
0e1f3483 4363 if (on_rq)
69be72c1 4364 dequeue_task(rq, p, 0);
0e1f3483
HS
4365 if (running)
4366 p->sched_class->put_prev_task(rq, p);
dd41f596
IM
4367
4368 if (rt_prio(prio))
4369 p->sched_class = &rt_sched_class;
4370 else
4371 p->sched_class = &fair_sched_class;
4372
b29739f9
IM
4373 p->prio = prio;
4374
0e1f3483
HS
4375 if (running)
4376 p->sched_class->set_curr_task(rq);
dd41f596 4377 if (on_rq) {
371fd7e7 4378 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
cb469845
SR
4379
4380 check_class_changed(rq, p, prev_class, oldprio, running);
b29739f9
IM
4381 }
4382 task_rq_unlock(rq, &flags);
4383}
4384
4385#endif
4386
36c8b586 4387void set_user_nice(struct task_struct *p, long nice)
1da177e4 4388{
dd41f596 4389 int old_prio, delta, on_rq;
1da177e4 4390 unsigned long flags;
70b97a7f 4391 struct rq *rq;
1da177e4
LT
4392
4393 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4394 return;
4395 /*
4396 * We have to be careful, if called from sys_setpriority(),
4397 * the task might be in the middle of scheduling on another CPU.
4398 */
4399 rq = task_rq_lock(p, &flags);
4400 /*
4401 * The RT priorities are set via sched_setscheduler(), but we still
4402 * allow the 'normal' nice value to be set - but as expected
4403 * it wont have any effect on scheduling until the task is
dd41f596 4404 * SCHED_FIFO/SCHED_RR:
1da177e4 4405 */
e05606d3 4406 if (task_has_rt_policy(p)) {
1da177e4
LT
4407 p->static_prio = NICE_TO_PRIO(nice);
4408 goto out_unlock;
4409 }
dd41f596 4410 on_rq = p->se.on_rq;
c09595f6 4411 if (on_rq)
69be72c1 4412 dequeue_task(rq, p, 0);
1da177e4 4413
1da177e4 4414 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 4415 set_load_weight(p);
b29739f9
IM
4416 old_prio = p->prio;
4417 p->prio = effective_prio(p);
4418 delta = p->prio - old_prio;
1da177e4 4419
dd41f596 4420 if (on_rq) {
371fd7e7 4421 enqueue_task(rq, p, 0);
1da177e4 4422 /*
d5f9f942
AM
4423 * If the task increased its priority or is running and
4424 * lowered its priority, then reschedule its CPU:
1da177e4 4425 */
d5f9f942 4426 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1da177e4
LT
4427 resched_task(rq->curr);
4428 }
4429out_unlock:
4430 task_rq_unlock(rq, &flags);
4431}
1da177e4
LT
4432EXPORT_SYMBOL(set_user_nice);
4433
e43379f1
MM
4434/*
4435 * can_nice - check if a task can reduce its nice value
4436 * @p: task
4437 * @nice: nice value
4438 */
36c8b586 4439int can_nice(const struct task_struct *p, const int nice)
e43379f1 4440{
024f4747
MM
4441 /* convert nice value [19,-20] to rlimit style value [1,40] */
4442 int nice_rlim = 20 - nice;
48f24c4d 4443
78d7d407 4444 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
4445 capable(CAP_SYS_NICE));
4446}
4447
1da177e4
LT
4448#ifdef __ARCH_WANT_SYS_NICE
4449
4450/*
4451 * sys_nice - change the priority of the current process.
4452 * @increment: priority increment
4453 *
4454 * sys_setpriority is a more generic, but much slower function that
4455 * does similar things.
4456 */
5add95d4 4457SYSCALL_DEFINE1(nice, int, increment)
1da177e4 4458{
48f24c4d 4459 long nice, retval;
1da177e4
LT
4460
4461 /*
4462 * Setpriority might change our priority at the same moment.
4463 * We don't have to worry. Conceptually one call occurs first
4464 * and we have a single winner.
4465 */
e43379f1
MM
4466 if (increment < -40)
4467 increment = -40;
1da177e4
LT
4468 if (increment > 40)
4469 increment = 40;
4470
2b8f836f 4471 nice = TASK_NICE(current) + increment;
1da177e4
LT
4472 if (nice < -20)
4473 nice = -20;
4474 if (nice > 19)
4475 nice = 19;
4476
e43379f1
MM
4477 if (increment < 0 && !can_nice(current, nice))
4478 return -EPERM;
4479
1da177e4
LT
4480 retval = security_task_setnice(current, nice);
4481 if (retval)
4482 return retval;
4483
4484 set_user_nice(current, nice);
4485 return 0;
4486}
4487
4488#endif
4489
4490/**
4491 * task_prio - return the priority value of a given task.
4492 * @p: the task in question.
4493 *
4494 * This is the priority value as seen by users in /proc.
4495 * RT tasks are offset by -200. Normal tasks are centered
4496 * around 0, value goes from -16 to +15.
4497 */
36c8b586 4498int task_prio(const struct task_struct *p)
1da177e4
LT
4499{
4500 return p->prio - MAX_RT_PRIO;
4501}
4502
4503/**
4504 * task_nice - return the nice value of a given task.
4505 * @p: the task in question.
4506 */
36c8b586 4507int task_nice(const struct task_struct *p)
1da177e4
LT
4508{
4509 return TASK_NICE(p);
4510}
150d8bed 4511EXPORT_SYMBOL(task_nice);
1da177e4
LT
4512
4513/**
4514 * idle_cpu - is a given cpu idle currently?
4515 * @cpu: the processor in question.
4516 */
4517int idle_cpu(int cpu)
4518{
4519 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4520}
4521
1da177e4
LT
4522/**
4523 * idle_task - return the idle task for a given cpu.
4524 * @cpu: the processor in question.
4525 */
36c8b586 4526struct task_struct *idle_task(int cpu)
1da177e4
LT
4527{
4528 return cpu_rq(cpu)->idle;
4529}
4530
4531/**
4532 * find_process_by_pid - find a process with a matching PID value.
4533 * @pid: the pid in question.
4534 */
a9957449 4535static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 4536{
228ebcbe 4537 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
4538}
4539
4540/* Actually do priority change: must hold rq lock. */
dd41f596
IM
4541static void
4542__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
1da177e4 4543{
dd41f596 4544 BUG_ON(p->se.on_rq);
48f24c4d 4545
1da177e4
LT
4546 p->policy = policy;
4547 p->rt_priority = prio;
b29739f9
IM
4548 p->normal_prio = normal_prio(p);
4549 /* we are holding p->pi_lock already */
4550 p->prio = rt_mutex_getprio(p);
ffd44db5
PZ
4551 if (rt_prio(p->prio))
4552 p->sched_class = &rt_sched_class;
4553 else
4554 p->sched_class = &fair_sched_class;
2dd73a4f 4555 set_load_weight(p);
1da177e4
LT
4556}
4557
c69e8d9c
DH
4558/*
4559 * check the target process has a UID that matches the current process's
4560 */
4561static bool check_same_owner(struct task_struct *p)
4562{
4563 const struct cred *cred = current_cred(), *pcred;
4564 bool match;
4565
4566 rcu_read_lock();
4567 pcred = __task_cred(p);
4568 match = (cred->euid == pcred->euid ||
4569 cred->euid == pcred->uid);
4570 rcu_read_unlock();
4571 return match;
4572}
4573
961ccddd
RR
4574static int __sched_setscheduler(struct task_struct *p, int policy,
4575 struct sched_param *param, bool user)
1da177e4 4576{
83b699ed 4577 int retval, oldprio, oldpolicy = -1, on_rq, running;
1da177e4 4578 unsigned long flags;
83ab0aa0 4579 const struct sched_class *prev_class;
70b97a7f 4580 struct rq *rq;
ca94c442 4581 int reset_on_fork;
1da177e4 4582
66e5393a
SR
4583 /* may grab non-irq protected spin_locks */
4584 BUG_ON(in_interrupt());
1da177e4
LT
4585recheck:
4586 /* double check policy once rq lock held */
ca94c442
LP
4587 if (policy < 0) {
4588 reset_on_fork = p->sched_reset_on_fork;
1da177e4 4589 policy = oldpolicy = p->policy;
ca94c442
LP
4590 } else {
4591 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4592 policy &= ~SCHED_RESET_ON_FORK;
4593
4594 if (policy != SCHED_FIFO && policy != SCHED_RR &&
4595 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4596 policy != SCHED_IDLE)
4597 return -EINVAL;
4598 }
4599
1da177e4
LT
4600 /*
4601 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
4602 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4603 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4
LT
4604 */
4605 if (param->sched_priority < 0 ||
95cdf3b7 4606 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
d46523ea 4607 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
1da177e4 4608 return -EINVAL;
e05606d3 4609 if (rt_policy(policy) != (param->sched_priority != 0))
1da177e4
LT
4610 return -EINVAL;
4611
37e4ab3f
OC
4612 /*
4613 * Allow unprivileged RT tasks to decrease priority:
4614 */
961ccddd 4615 if (user && !capable(CAP_SYS_NICE)) {
e05606d3 4616 if (rt_policy(policy)) {
a44702e8
ON
4617 unsigned long rlim_rtprio =
4618 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
4619
4620 /* can't set/change the rt policy */
4621 if (policy != p->policy && !rlim_rtprio)
4622 return -EPERM;
4623
4624 /* can't increase priority */
4625 if (param->sched_priority > p->rt_priority &&
4626 param->sched_priority > rlim_rtprio)
4627 return -EPERM;
4628 }
dd41f596
IM
4629 /*
4630 * Like positive nice levels, dont allow tasks to
4631 * move out of SCHED_IDLE either:
4632 */
4633 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4634 return -EPERM;
5fe1d75f 4635
37e4ab3f 4636 /* can't change other user's priorities */
c69e8d9c 4637 if (!check_same_owner(p))
37e4ab3f 4638 return -EPERM;
ca94c442
LP
4639
4640 /* Normal users shall not reset the sched_reset_on_fork flag */
4641 if (p->sched_reset_on_fork && !reset_on_fork)
4642 return -EPERM;
37e4ab3f 4643 }
1da177e4 4644
725aad24 4645 if (user) {
725aad24
JF
4646 retval = security_task_setscheduler(p, policy, param);
4647 if (retval)
4648 return retval;
4649 }
4650
b29739f9
IM
4651 /*
4652 * make sure no PI-waiters arrive (or leave) while we are
4653 * changing the priority of the task:
4654 */
1d615482 4655 raw_spin_lock_irqsave(&p->pi_lock, flags);
1da177e4
LT
4656 /*
4657 * To be able to change p->policy safely, the apropriate
4658 * runqueue lock must be held.
4659 */
b29739f9 4660 rq = __task_rq_lock(p);
dc61b1d6
PZ
4661
4662#ifdef CONFIG_RT_GROUP_SCHED
4663 if (user) {
4664 /*
4665 * Do not allow realtime tasks into groups that have no runtime
4666 * assigned.
4667 */
4668 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4669 task_group(p)->rt_bandwidth.rt_runtime == 0) {
4670 __task_rq_unlock(rq);
4671 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4672 return -EPERM;
4673 }
4674 }
4675#endif
4676
1da177e4
LT
4677 /* recheck policy now with rq lock held */
4678 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4679 policy = oldpolicy = -1;
b29739f9 4680 __task_rq_unlock(rq);
1d615482 4681 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
4682 goto recheck;
4683 }
dd41f596 4684 on_rq = p->se.on_rq;
051a1d1a 4685 running = task_current(rq, p);
0e1f3483 4686 if (on_rq)
2e1cb74a 4687 deactivate_task(rq, p, 0);
0e1f3483
HS
4688 if (running)
4689 p->sched_class->put_prev_task(rq, p);
f6b53205 4690
ca94c442
LP
4691 p->sched_reset_on_fork = reset_on_fork;
4692
1da177e4 4693 oldprio = p->prio;
83ab0aa0 4694 prev_class = p->sched_class;
dd41f596 4695 __setscheduler(rq, p, policy, param->sched_priority);
f6b53205 4696
0e1f3483
HS
4697 if (running)
4698 p->sched_class->set_curr_task(rq);
dd41f596
IM
4699 if (on_rq) {
4700 activate_task(rq, p, 0);
cb469845
SR
4701
4702 check_class_changed(rq, p, prev_class, oldprio, running);
1da177e4 4703 }
b29739f9 4704 __task_rq_unlock(rq);
1d615482 4705 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
b29739f9 4706
95e02ca9
TG
4707 rt_mutex_adjust_pi(p);
4708
1da177e4
LT
4709 return 0;
4710}
961ccddd
RR
4711
4712/**
4713 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4714 * @p: the task in question.
4715 * @policy: new policy.
4716 * @param: structure containing the new RT priority.
4717 *
4718 * NOTE that the task may be already dead.
4719 */
4720int sched_setscheduler(struct task_struct *p, int policy,
4721 struct sched_param *param)
4722{
4723 return __sched_setscheduler(p, policy, param, true);
4724}
1da177e4
LT
4725EXPORT_SYMBOL_GPL(sched_setscheduler);
4726
961ccddd
RR
4727/**
4728 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4729 * @p: the task in question.
4730 * @policy: new policy.
4731 * @param: structure containing the new RT priority.
4732 *
4733 * Just like sched_setscheduler, only don't bother checking if the
4734 * current context has permission. For example, this is needed in
4735 * stop_machine(): we create temporary high priority worker threads,
4736 * but our caller might not have that capability.
4737 */
4738int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4739 struct sched_param *param)
4740{
4741 return __sched_setscheduler(p, policy, param, false);
4742}
4743
95cdf3b7
IM
4744static int
4745do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 4746{
1da177e4
LT
4747 struct sched_param lparam;
4748 struct task_struct *p;
36c8b586 4749 int retval;
1da177e4
LT
4750
4751 if (!param || pid < 0)
4752 return -EINVAL;
4753 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4754 return -EFAULT;
5fe1d75f
ON
4755
4756 rcu_read_lock();
4757 retval = -ESRCH;
1da177e4 4758 p = find_process_by_pid(pid);
5fe1d75f
ON
4759 if (p != NULL)
4760 retval = sched_setscheduler(p, policy, &lparam);
4761 rcu_read_unlock();
36c8b586 4762
1da177e4
LT
4763 return retval;
4764}
4765
4766/**
4767 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4768 * @pid: the pid in question.
4769 * @policy: new policy.
4770 * @param: structure containing the new RT priority.
4771 */
5add95d4
HC
4772SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4773 struct sched_param __user *, param)
1da177e4 4774{
c21761f1
JB
4775 /* negative values for policy are not valid */
4776 if (policy < 0)
4777 return -EINVAL;
4778
1da177e4
LT
4779 return do_sched_setscheduler(pid, policy, param);
4780}
4781
4782/**
4783 * sys_sched_setparam - set/change the RT priority of a thread
4784 * @pid: the pid in question.
4785 * @param: structure containing the new RT priority.
4786 */
5add95d4 4787SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
4788{
4789 return do_sched_setscheduler(pid, -1, param);
4790}
4791
4792/**
4793 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4794 * @pid: the pid in question.
4795 */
5add95d4 4796SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 4797{
36c8b586 4798 struct task_struct *p;
3a5c359a 4799 int retval;
1da177e4
LT
4800
4801 if (pid < 0)
3a5c359a 4802 return -EINVAL;
1da177e4
LT
4803
4804 retval = -ESRCH;
5fe85be0 4805 rcu_read_lock();
1da177e4
LT
4806 p = find_process_by_pid(pid);
4807 if (p) {
4808 retval = security_task_getscheduler(p);
4809 if (!retval)
ca94c442
LP
4810 retval = p->policy
4811 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 4812 }
5fe85be0 4813 rcu_read_unlock();
1da177e4
LT
4814 return retval;
4815}
4816
4817/**
ca94c442 4818 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
4819 * @pid: the pid in question.
4820 * @param: structure containing the RT priority.
4821 */
5add95d4 4822SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
4823{
4824 struct sched_param lp;
36c8b586 4825 struct task_struct *p;
3a5c359a 4826 int retval;
1da177e4
LT
4827
4828 if (!param || pid < 0)
3a5c359a 4829 return -EINVAL;
1da177e4 4830
5fe85be0 4831 rcu_read_lock();
1da177e4
LT
4832 p = find_process_by_pid(pid);
4833 retval = -ESRCH;
4834 if (!p)
4835 goto out_unlock;
4836
4837 retval = security_task_getscheduler(p);
4838 if (retval)
4839 goto out_unlock;
4840
4841 lp.sched_priority = p->rt_priority;
5fe85be0 4842 rcu_read_unlock();
1da177e4
LT
4843
4844 /*
4845 * This one might sleep, we cannot do it with a spinlock held ...
4846 */
4847 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4848
1da177e4
LT
4849 return retval;
4850
4851out_unlock:
5fe85be0 4852 rcu_read_unlock();
1da177e4
LT
4853 return retval;
4854}
4855
96f874e2 4856long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 4857{
5a16f3d3 4858 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
4859 struct task_struct *p;
4860 int retval;
1da177e4 4861
95402b38 4862 get_online_cpus();
23f5d142 4863 rcu_read_lock();
1da177e4
LT
4864
4865 p = find_process_by_pid(pid);
4866 if (!p) {
23f5d142 4867 rcu_read_unlock();
95402b38 4868 put_online_cpus();
1da177e4
LT
4869 return -ESRCH;
4870 }
4871
23f5d142 4872 /* Prevent p going away */
1da177e4 4873 get_task_struct(p);
23f5d142 4874 rcu_read_unlock();
1da177e4 4875
5a16f3d3
RR
4876 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4877 retval = -ENOMEM;
4878 goto out_put_task;
4879 }
4880 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4881 retval = -ENOMEM;
4882 goto out_free_cpus_allowed;
4883 }
1da177e4 4884 retval = -EPERM;
c69e8d9c 4885 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
1da177e4
LT
4886 goto out_unlock;
4887
e7834f8f
DQ
4888 retval = security_task_setscheduler(p, 0, NULL);
4889 if (retval)
4890 goto out_unlock;
4891
5a16f3d3
RR
4892 cpuset_cpus_allowed(p, cpus_allowed);
4893 cpumask_and(new_mask, in_mask, cpus_allowed);
8707d8b8 4894 again:
5a16f3d3 4895 retval = set_cpus_allowed_ptr(p, new_mask);
1da177e4 4896
8707d8b8 4897 if (!retval) {
5a16f3d3
RR
4898 cpuset_cpus_allowed(p, cpus_allowed);
4899 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
4900 /*
4901 * We must have raced with a concurrent cpuset
4902 * update. Just reset the cpus_allowed to the
4903 * cpuset's cpus_allowed
4904 */
5a16f3d3 4905 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
4906 goto again;
4907 }
4908 }
1da177e4 4909out_unlock:
5a16f3d3
RR
4910 free_cpumask_var(new_mask);
4911out_free_cpus_allowed:
4912 free_cpumask_var(cpus_allowed);
4913out_put_task:
1da177e4 4914 put_task_struct(p);
95402b38 4915 put_online_cpus();
1da177e4
LT
4916 return retval;
4917}
4918
4919static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 4920 struct cpumask *new_mask)
1da177e4 4921{
96f874e2
RR
4922 if (len < cpumask_size())
4923 cpumask_clear(new_mask);
4924 else if (len > cpumask_size())
4925 len = cpumask_size();
4926
1da177e4
LT
4927 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4928}
4929
4930/**
4931 * sys_sched_setaffinity - set the cpu affinity of a process
4932 * @pid: pid of the process
4933 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4934 * @user_mask_ptr: user-space pointer to the new cpu mask
4935 */
5add95d4
HC
4936SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4937 unsigned long __user *, user_mask_ptr)
1da177e4 4938{
5a16f3d3 4939 cpumask_var_t new_mask;
1da177e4
LT
4940 int retval;
4941
5a16f3d3
RR
4942 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4943 return -ENOMEM;
1da177e4 4944
5a16f3d3
RR
4945 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4946 if (retval == 0)
4947 retval = sched_setaffinity(pid, new_mask);
4948 free_cpumask_var(new_mask);
4949 return retval;
1da177e4
LT
4950}
4951
96f874e2 4952long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 4953{
36c8b586 4954 struct task_struct *p;
31605683
TG
4955 unsigned long flags;
4956 struct rq *rq;
1da177e4 4957 int retval;
1da177e4 4958
95402b38 4959 get_online_cpus();
23f5d142 4960 rcu_read_lock();
1da177e4
LT
4961
4962 retval = -ESRCH;
4963 p = find_process_by_pid(pid);
4964 if (!p)
4965 goto out_unlock;
4966
e7834f8f
DQ
4967 retval = security_task_getscheduler(p);
4968 if (retval)
4969 goto out_unlock;
4970
31605683 4971 rq = task_rq_lock(p, &flags);
96f874e2 4972 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
31605683 4973 task_rq_unlock(rq, &flags);
1da177e4
LT
4974
4975out_unlock:
23f5d142 4976 rcu_read_unlock();
95402b38 4977 put_online_cpus();
1da177e4 4978
9531b62f 4979 return retval;
1da177e4
LT
4980}
4981
4982/**
4983 * sys_sched_getaffinity - get the cpu affinity of a process
4984 * @pid: pid of the process
4985 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4986 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4987 */
5add95d4
HC
4988SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4989 unsigned long __user *, user_mask_ptr)
1da177e4
LT
4990{
4991 int ret;
f17c8607 4992 cpumask_var_t mask;
1da177e4 4993
84fba5ec 4994 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
4995 return -EINVAL;
4996 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
4997 return -EINVAL;
4998
f17c8607
RR
4999 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5000 return -ENOMEM;
1da177e4 5001
f17c8607
RR
5002 ret = sched_getaffinity(pid, mask);
5003 if (ret == 0) {
8bc037fb 5004 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
5005
5006 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
5007 ret = -EFAULT;
5008 else
cd3d8031 5009 ret = retlen;
f17c8607
RR
5010 }
5011 free_cpumask_var(mask);
1da177e4 5012
f17c8607 5013 return ret;
1da177e4
LT
5014}
5015
5016/**
5017 * sys_sched_yield - yield the current processor to other threads.
5018 *
dd41f596
IM
5019 * This function yields the current CPU to other tasks. If there are no
5020 * other threads running on this CPU then this function will return.
1da177e4 5021 */
5add95d4 5022SYSCALL_DEFINE0(sched_yield)
1da177e4 5023{
70b97a7f 5024 struct rq *rq = this_rq_lock();
1da177e4 5025
2d72376b 5026 schedstat_inc(rq, yld_count);
4530d7ab 5027 current->sched_class->yield_task(rq);
1da177e4
LT
5028
5029 /*
5030 * Since we are going to call schedule() anyway, there's
5031 * no need to preempt or enable interrupts:
5032 */
5033 __release(rq->lock);
8a25d5de 5034 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 5035 do_raw_spin_unlock(&rq->lock);
1da177e4
LT
5036 preempt_enable_no_resched();
5037
5038 schedule();
5039
5040 return 0;
5041}
5042
d86ee480
PZ
5043static inline int should_resched(void)
5044{
5045 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5046}
5047
e7b38404 5048static void __cond_resched(void)
1da177e4 5049{
e7aaaa69
FW
5050 add_preempt_count(PREEMPT_ACTIVE);
5051 schedule();
5052 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4
LT
5053}
5054
02b67cc3 5055int __sched _cond_resched(void)
1da177e4 5056{
d86ee480 5057 if (should_resched()) {
1da177e4
LT
5058 __cond_resched();
5059 return 1;
5060 }
5061 return 0;
5062}
02b67cc3 5063EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
5064
5065/*
613afbf8 5066 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
5067 * call schedule, and on return reacquire the lock.
5068 *
41a2d6cf 5069 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
5070 * operations here to prevent schedule() from being called twice (once via
5071 * spin_unlock(), once by hand).
5072 */
613afbf8 5073int __cond_resched_lock(spinlock_t *lock)
1da177e4 5074{
d86ee480 5075 int resched = should_resched();
6df3cecb
JK
5076 int ret = 0;
5077
f607c668
PZ
5078 lockdep_assert_held(lock);
5079
95c354fe 5080 if (spin_needbreak(lock) || resched) {
1da177e4 5081 spin_unlock(lock);
d86ee480 5082 if (resched)
95c354fe
NP
5083 __cond_resched();
5084 else
5085 cpu_relax();
6df3cecb 5086 ret = 1;
1da177e4 5087 spin_lock(lock);
1da177e4 5088 }
6df3cecb 5089 return ret;
1da177e4 5090}
613afbf8 5091EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 5092
613afbf8 5093int __sched __cond_resched_softirq(void)
1da177e4
LT
5094{
5095 BUG_ON(!in_softirq());
5096
d86ee480 5097 if (should_resched()) {
98d82567 5098 local_bh_enable();
1da177e4
LT
5099 __cond_resched();
5100 local_bh_disable();
5101 return 1;
5102 }
5103 return 0;
5104}
613afbf8 5105EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 5106
1da177e4
LT
5107/**
5108 * yield - yield the current processor to other threads.
5109 *
72fd4a35 5110 * This is a shortcut for kernel-space yielding - it marks the
1da177e4
LT
5111 * thread runnable and calls sys_sched_yield().
5112 */
5113void __sched yield(void)
5114{
5115 set_current_state(TASK_RUNNING);
5116 sys_sched_yield();
5117}
1da177e4
LT
5118EXPORT_SYMBOL(yield);
5119
5120/*
41a2d6cf 5121 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 5122 * that process accounting knows that this is a task in IO wait state.
1da177e4
LT
5123 */
5124void __sched io_schedule(void)
5125{
54d35f29 5126 struct rq *rq = raw_rq();
1da177e4 5127
0ff92245 5128 delayacct_blkio_start();
1da177e4 5129 atomic_inc(&rq->nr_iowait);
8f0dfc34 5130 current->in_iowait = 1;
1da177e4 5131 schedule();
8f0dfc34 5132 current->in_iowait = 0;
1da177e4 5133 atomic_dec(&rq->nr_iowait);
0ff92245 5134 delayacct_blkio_end();
1da177e4 5135}
1da177e4
LT
5136EXPORT_SYMBOL(io_schedule);
5137
5138long __sched io_schedule_timeout(long timeout)
5139{
54d35f29 5140 struct rq *rq = raw_rq();
1da177e4
LT
5141 long ret;
5142
0ff92245 5143 delayacct_blkio_start();
1da177e4 5144 atomic_inc(&rq->nr_iowait);
8f0dfc34 5145 current->in_iowait = 1;
1da177e4 5146 ret = schedule_timeout(timeout);
8f0dfc34 5147 current->in_iowait = 0;
1da177e4 5148 atomic_dec(&rq->nr_iowait);
0ff92245 5149 delayacct_blkio_end();
1da177e4
LT
5150 return ret;
5151}
5152
5153/**
5154 * sys_sched_get_priority_max - return maximum RT priority.
5155 * @policy: scheduling class.
5156 *
5157 * this syscall returns the maximum rt_priority that can be used
5158 * by a given scheduling class.
5159 */
5add95d4 5160SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
5161{
5162 int ret = -EINVAL;
5163
5164 switch (policy) {
5165 case SCHED_FIFO:
5166 case SCHED_RR:
5167 ret = MAX_USER_RT_PRIO-1;
5168 break;
5169 case SCHED_NORMAL:
b0a9499c 5170 case SCHED_BATCH:
dd41f596 5171 case SCHED_IDLE:
1da177e4
LT
5172 ret = 0;
5173 break;
5174 }
5175 return ret;
5176}
5177
5178/**
5179 * sys_sched_get_priority_min - return minimum RT priority.
5180 * @policy: scheduling class.
5181 *
5182 * this syscall returns the minimum rt_priority that can be used
5183 * by a given scheduling class.
5184 */
5add95d4 5185SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
5186{
5187 int ret = -EINVAL;
5188
5189 switch (policy) {
5190 case SCHED_FIFO:
5191 case SCHED_RR:
5192 ret = 1;
5193 break;
5194 case SCHED_NORMAL:
b0a9499c 5195 case SCHED_BATCH:
dd41f596 5196 case SCHED_IDLE:
1da177e4
LT
5197 ret = 0;
5198 }
5199 return ret;
5200}
5201
5202/**
5203 * sys_sched_rr_get_interval - return the default timeslice of a process.
5204 * @pid: pid of the process.
5205 * @interval: userspace pointer to the timeslice value.
5206 *
5207 * this syscall writes the default timeslice value of a given process
5208 * into the user-space timespec buffer. A value of '0' means infinity.
5209 */
17da2bd9 5210SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 5211 struct timespec __user *, interval)
1da177e4 5212{
36c8b586 5213 struct task_struct *p;
a4ec24b4 5214 unsigned int time_slice;
dba091b9
TG
5215 unsigned long flags;
5216 struct rq *rq;
3a5c359a 5217 int retval;
1da177e4 5218 struct timespec t;
1da177e4
LT
5219
5220 if (pid < 0)
3a5c359a 5221 return -EINVAL;
1da177e4
LT
5222
5223 retval = -ESRCH;
1a551ae7 5224 rcu_read_lock();
1da177e4
LT
5225 p = find_process_by_pid(pid);
5226 if (!p)
5227 goto out_unlock;
5228
5229 retval = security_task_getscheduler(p);
5230 if (retval)
5231 goto out_unlock;
5232
dba091b9
TG
5233 rq = task_rq_lock(p, &flags);
5234 time_slice = p->sched_class->get_rr_interval(rq, p);
5235 task_rq_unlock(rq, &flags);
a4ec24b4 5236
1a551ae7 5237 rcu_read_unlock();
a4ec24b4 5238 jiffies_to_timespec(time_slice, &t);
1da177e4 5239 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 5240 return retval;
3a5c359a 5241
1da177e4 5242out_unlock:
1a551ae7 5243 rcu_read_unlock();
1da177e4
LT
5244 return retval;
5245}
5246
7c731e0a 5247static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 5248
82a1fcb9 5249void sched_show_task(struct task_struct *p)
1da177e4 5250{
1da177e4 5251 unsigned long free = 0;
36c8b586 5252 unsigned state;
1da177e4 5253
1da177e4 5254 state = p->state ? __ffs(p->state) + 1 : 0;
3df0fc5b 5255 printk(KERN_INFO "%-13.13s %c", p->comm,
2ed6e34f 5256 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 5257#if BITS_PER_LONG == 32
1da177e4 5258 if (state == TASK_RUNNING)
3df0fc5b 5259 printk(KERN_CONT " running ");
1da177e4 5260 else
3df0fc5b 5261 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
5262#else
5263 if (state == TASK_RUNNING)
3df0fc5b 5264 printk(KERN_CONT " running task ");
1da177e4 5265 else
3df0fc5b 5266 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
5267#endif
5268#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 5269 free = stack_not_used(p);
1da177e4 5270#endif
3df0fc5b 5271 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
aa47b7e0
DR
5272 task_pid_nr(p), task_pid_nr(p->real_parent),
5273 (unsigned long)task_thread_info(p)->flags);
1da177e4 5274
5fb5e6de 5275 show_stack(p, NULL);
1da177e4
LT
5276}
5277
e59e2ae2 5278void show_state_filter(unsigned long state_filter)
1da177e4 5279{
36c8b586 5280 struct task_struct *g, *p;
1da177e4 5281
4bd77321 5282#if BITS_PER_LONG == 32
3df0fc5b
PZ
5283 printk(KERN_INFO
5284 " task PC stack pid father\n");
1da177e4 5285#else
3df0fc5b
PZ
5286 printk(KERN_INFO
5287 " task PC stack pid father\n");
1da177e4
LT
5288#endif
5289 read_lock(&tasklist_lock);
5290 do_each_thread(g, p) {
5291 /*
5292 * reset the NMI-timeout, listing all files on a slow
5293 * console might take alot of time:
5294 */
5295 touch_nmi_watchdog();
39bc89fd 5296 if (!state_filter || (p->state & state_filter))
82a1fcb9 5297 sched_show_task(p);
1da177e4
LT
5298 } while_each_thread(g, p);
5299
04c9167f
JF
5300 touch_all_softlockup_watchdogs();
5301
dd41f596
IM
5302#ifdef CONFIG_SCHED_DEBUG
5303 sysrq_sched_debug_show();
5304#endif
1da177e4 5305 read_unlock(&tasklist_lock);
e59e2ae2
IM
5306 /*
5307 * Only show locks if all tasks are dumped:
5308 */
93335a21 5309 if (!state_filter)
e59e2ae2 5310 debug_show_all_locks();
1da177e4
LT
5311}
5312
1df21055
IM
5313void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5314{
dd41f596 5315 idle->sched_class = &idle_sched_class;
1df21055
IM
5316}
5317
f340c0d1
IM
5318/**
5319 * init_idle - set up an idle thread for a given CPU
5320 * @idle: task in question
5321 * @cpu: cpu the idle task belongs to
5322 *
5323 * NOTE: this function does not set the idle thread's NEED_RESCHED
5324 * flag, to make booting more robust.
5325 */
5c1e1767 5326void __cpuinit init_idle(struct task_struct *idle, int cpu)
1da177e4 5327{
70b97a7f 5328 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
5329 unsigned long flags;
5330
05fa785c 5331 raw_spin_lock_irqsave(&rq->lock, flags);
5cbd54ef 5332
dd41f596 5333 __sched_fork(idle);
06b83b5f 5334 idle->state = TASK_RUNNING;
dd41f596
IM
5335 idle->se.exec_start = sched_clock();
5336
96f874e2 5337 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
dd41f596 5338 __set_task_cpu(idle, cpu);
1da177e4 5339
1da177e4 5340 rq->curr = rq->idle = idle;
4866cde0
NP
5341#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5342 idle->oncpu = 1;
5343#endif
05fa785c 5344 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4
LT
5345
5346 /* Set the preempt count _outside_ the spinlocks! */
8e3e076c
LT
5347#if defined(CONFIG_PREEMPT)
5348 task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5349#else
a1261f54 5350 task_thread_info(idle)->preempt_count = 0;
8e3e076c 5351#endif
dd41f596
IM
5352 /*
5353 * The idle tasks have their own, simple scheduling class:
5354 */
5355 idle->sched_class = &idle_sched_class;
fb52607a 5356 ftrace_graph_init_task(idle);
1da177e4
LT
5357}
5358
5359/*
5360 * In a system that switches off the HZ timer nohz_cpu_mask
5361 * indicates which cpus entered this state. This is used
5362 * in the rcu update to wait only for active cpus. For system
5363 * which do not switch off the HZ timer nohz_cpu_mask should
6a7b3dc3 5364 * always be CPU_BITS_NONE.
1da177e4 5365 */
6a7b3dc3 5366cpumask_var_t nohz_cpu_mask;
1da177e4 5367
19978ca6
IM
5368/*
5369 * Increase the granularity value when there are more CPUs,
5370 * because with more CPUs the 'effective latency' as visible
5371 * to users decreases. But the relationship is not linear,
5372 * so pick a second-best guess by going with the log2 of the
5373 * number of CPUs.
5374 *
5375 * This idea comes from the SD scheduler of Con Kolivas:
5376 */
acb4a848 5377static int get_update_sysctl_factor(void)
19978ca6 5378{
4ca3ef71 5379 unsigned int cpus = min_t(int, num_online_cpus(), 8);
1983a922
CE
5380 unsigned int factor;
5381
5382 switch (sysctl_sched_tunable_scaling) {
5383 case SCHED_TUNABLESCALING_NONE:
5384 factor = 1;
5385 break;
5386 case SCHED_TUNABLESCALING_LINEAR:
5387 factor = cpus;
5388 break;
5389 case SCHED_TUNABLESCALING_LOG:
5390 default:
5391 factor = 1 + ilog2(cpus);
5392 break;
5393 }
19978ca6 5394
acb4a848
CE
5395 return factor;
5396}
19978ca6 5397
acb4a848
CE
5398static void update_sysctl(void)
5399{
5400 unsigned int factor = get_update_sysctl_factor();
19978ca6 5401
0bcdcf28
CE
5402#define SET_SYSCTL(name) \
5403 (sysctl_##name = (factor) * normalized_sysctl_##name)
5404 SET_SYSCTL(sched_min_granularity);
5405 SET_SYSCTL(sched_latency);
5406 SET_SYSCTL(sched_wakeup_granularity);
5407 SET_SYSCTL(sched_shares_ratelimit);
5408#undef SET_SYSCTL
5409}
55cd5340 5410
0bcdcf28
CE
5411static inline void sched_init_granularity(void)
5412{
5413 update_sysctl();
19978ca6
IM
5414}
5415
1da177e4
LT
5416#ifdef CONFIG_SMP
5417/*
5418 * This is how migration works:
5419 *
969c7921
TH
5420 * 1) we invoke migration_cpu_stop() on the target CPU using
5421 * stop_one_cpu().
5422 * 2) stopper starts to run (implicitly forcing the migrated thread
5423 * off the CPU)
5424 * 3) it checks whether the migrated task is still in the wrong runqueue.
5425 * 4) if it's in the wrong runqueue then the migration thread removes
1da177e4 5426 * it and puts it into the right queue.
969c7921
TH
5427 * 5) stopper completes and stop_one_cpu() returns and the migration
5428 * is done.
1da177e4
LT
5429 */
5430
5431/*
5432 * Change a given task's CPU affinity. Migrate the thread to a
5433 * proper CPU and schedule it away if the CPU it's executing on
5434 * is removed from the allowed bitmask.
5435 *
5436 * NOTE: the caller must have a valid reference to the task, the
41a2d6cf 5437 * task must not exit() & deallocate itself prematurely. The
1da177e4
LT
5438 * call is not atomic; no spinlocks may be held.
5439 */
96f874e2 5440int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4
LT
5441{
5442 unsigned long flags;
70b97a7f 5443 struct rq *rq;
969c7921 5444 unsigned int dest_cpu;
48f24c4d 5445 int ret = 0;
1da177e4 5446
65cc8e48
PZ
5447 /*
5448 * Serialize against TASK_WAKING so that ttwu() and wunt() can
5449 * drop the rq->lock and still rely on ->cpus_allowed.
5450 */
5451again:
5452 while (task_is_waking(p))
5453 cpu_relax();
1da177e4 5454 rq = task_rq_lock(p, &flags);
65cc8e48
PZ
5455 if (task_is_waking(p)) {
5456 task_rq_unlock(rq, &flags);
5457 goto again;
5458 }
e2912009 5459
6ad4c188 5460 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1da177e4
LT
5461 ret = -EINVAL;
5462 goto out;
5463 }
5464
9985b0ba 5465 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
96f874e2 5466 !cpumask_equal(&p->cpus_allowed, new_mask))) {
9985b0ba
DR
5467 ret = -EINVAL;
5468 goto out;
5469 }
5470
73fe6aae 5471 if (p->sched_class->set_cpus_allowed)
cd8ba7cd 5472 p->sched_class->set_cpus_allowed(p, new_mask);
73fe6aae 5473 else {
96f874e2
RR
5474 cpumask_copy(&p->cpus_allowed, new_mask);
5475 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
73fe6aae
GH
5476 }
5477
1da177e4 5478 /* Can the task run on the task's current CPU? If so, we're done */
96f874e2 5479 if (cpumask_test_cpu(task_cpu(p), new_mask))
1da177e4
LT
5480 goto out;
5481
969c7921
TH
5482 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5483 if (migrate_task(p, dest_cpu)) {
5484 struct migration_arg arg = { p, dest_cpu };
1da177e4
LT
5485 /* Need help from migration thread: drop lock and wait. */
5486 task_rq_unlock(rq, &flags);
969c7921 5487 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1da177e4
LT
5488 tlb_migrate_finish(p->mm);
5489 return 0;
5490 }
5491out:
5492 task_rq_unlock(rq, &flags);
48f24c4d 5493
1da177e4
LT
5494 return ret;
5495}
cd8ba7cd 5496EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1da177e4
LT
5497
5498/*
41a2d6cf 5499 * Move (not current) task off this cpu, onto dest cpu. We're doing
1da177e4
LT
5500 * this because either it can't run here any more (set_cpus_allowed()
5501 * away from this CPU, or CPU going down), or because we're
5502 * attempting to rebalance this task on exec (sched_exec).
5503 *
5504 * So we race with normal scheduler movements, but that's OK, as long
5505 * as the task is no longer on this CPU.
efc30814
KK
5506 *
5507 * Returns non-zero if task was successfully migrated.
1da177e4 5508 */
efc30814 5509static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1da177e4 5510{
70b97a7f 5511 struct rq *rq_dest, *rq_src;
e2912009 5512 int ret = 0;
1da177e4 5513
e761b772 5514 if (unlikely(!cpu_active(dest_cpu)))
efc30814 5515 return ret;
1da177e4
LT
5516
5517 rq_src = cpu_rq(src_cpu);
5518 rq_dest = cpu_rq(dest_cpu);
5519
5520 double_rq_lock(rq_src, rq_dest);
5521 /* Already moved. */
5522 if (task_cpu(p) != src_cpu)
b1e38734 5523 goto done;
1da177e4 5524 /* Affinity changed (again). */
96f874e2 5525 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
b1e38734 5526 goto fail;
1da177e4 5527
e2912009
PZ
5528 /*
5529 * If we're not on a rq, the next wake-up will ensure we're
5530 * placed properly.
5531 */
5532 if (p->se.on_rq) {
2e1cb74a 5533 deactivate_task(rq_src, p, 0);
e2912009 5534 set_task_cpu(p, dest_cpu);
dd41f596 5535 activate_task(rq_dest, p, 0);
15afe09b 5536 check_preempt_curr(rq_dest, p, 0);
1da177e4 5537 }
b1e38734 5538done:
efc30814 5539 ret = 1;
b1e38734 5540fail:
1da177e4 5541 double_rq_unlock(rq_src, rq_dest);
efc30814 5542 return ret;
1da177e4
LT
5543}
5544
5545/*
969c7921
TH
5546 * migration_cpu_stop - this will be executed by a highprio stopper thread
5547 * and performs thread migration by bumping thread off CPU then
5548 * 'pushing' onto another runqueue.
1da177e4 5549 */
969c7921 5550static int migration_cpu_stop(void *data)
1da177e4 5551{
969c7921 5552 struct migration_arg *arg = data;
f7b4cddc 5553
969c7921
TH
5554 /*
5555 * The original target cpu might have gone down and we might
5556 * be on another cpu but it doesn't matter.
5557 */
f7b4cddc 5558 local_irq_disable();
969c7921 5559 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
f7b4cddc 5560 local_irq_enable();
1da177e4 5561 return 0;
f7b4cddc
ON
5562}
5563
1da177e4 5564#ifdef CONFIG_HOTPLUG_CPU
054b9108 5565/*
3a4fa0a2 5566 * Figure out where task on dead CPU should go, use force if necessary.
054b9108 5567 */
6a1bdc1b 5568void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
1da177e4 5569{
1445c08d
ON
5570 struct rq *rq = cpu_rq(dead_cpu);
5571 int needs_cpu, uninitialized_var(dest_cpu);
5572 unsigned long flags;
e76bd8d9 5573
1445c08d 5574 local_irq_save(flags);
e76bd8d9 5575
1445c08d
ON
5576 raw_spin_lock(&rq->lock);
5577 needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
5578 if (needs_cpu)
5579 dest_cpu = select_fallback_rq(dead_cpu, p);
5580 raw_spin_unlock(&rq->lock);
c1804d54
ON
5581 /*
5582 * It can only fail if we race with set_cpus_allowed(),
5583 * in the racer should migrate the task anyway.
5584 */
1445c08d 5585 if (needs_cpu)
c1804d54 5586 __migrate_task(p, dead_cpu, dest_cpu);
1445c08d 5587 local_irq_restore(flags);
1da177e4
LT
5588}
5589
5590/*
5591 * While a dead CPU has no uninterruptible tasks queued at this point,
5592 * it might still have a nonzero ->nr_uninterruptible counter, because
5593 * for performance reasons the counter is not stricly tracking tasks to
5594 * their home CPUs. So we just add the counter to another CPU's counter,
5595 * to keep the global sum constant after CPU-down:
5596 */
70b97a7f 5597static void migrate_nr_uninterruptible(struct rq *rq_src)
1da177e4 5598{
6ad4c188 5599 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
1da177e4
LT
5600 unsigned long flags;
5601
5602 local_irq_save(flags);
5603 double_rq_lock(rq_src, rq_dest);
5604 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5605 rq_src->nr_uninterruptible = 0;
5606 double_rq_unlock(rq_src, rq_dest);
5607 local_irq_restore(flags);
5608}
5609
5610/* Run through task list and migrate tasks from the dead cpu. */
5611static void migrate_live_tasks(int src_cpu)
5612{
48f24c4d 5613 struct task_struct *p, *t;
1da177e4 5614
f7b4cddc 5615 read_lock(&tasklist_lock);
1da177e4 5616
48f24c4d
IM
5617 do_each_thread(t, p) {
5618 if (p == current)
1da177e4
LT
5619 continue;
5620
48f24c4d
IM
5621 if (task_cpu(p) == src_cpu)
5622 move_task_off_dead_cpu(src_cpu, p);
5623 } while_each_thread(t, p);
1da177e4 5624
f7b4cddc 5625 read_unlock(&tasklist_lock);
1da177e4
LT
5626}
5627
dd41f596
IM
5628/*
5629 * Schedules idle task to be the next runnable task on current CPU.
94bc9a7b
DA
5630 * It does so by boosting its priority to highest possible.
5631 * Used by CPU offline code.
1da177e4
LT
5632 */
5633void sched_idle_next(void)
5634{
48f24c4d 5635 int this_cpu = smp_processor_id();
70b97a7f 5636 struct rq *rq = cpu_rq(this_cpu);
1da177e4
LT
5637 struct task_struct *p = rq->idle;
5638 unsigned long flags;
5639
5640 /* cpu has to be offline */
48f24c4d 5641 BUG_ON(cpu_online(this_cpu));
1da177e4 5642
48f24c4d
IM
5643 /*
5644 * Strictly not necessary since rest of the CPUs are stopped by now
5645 * and interrupts disabled on the current cpu.
1da177e4 5646 */
05fa785c 5647 raw_spin_lock_irqsave(&rq->lock, flags);
1da177e4 5648
dd41f596 5649 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
48f24c4d 5650
94bc9a7b 5651 activate_task(rq, p, 0);
1da177e4 5652
05fa785c 5653 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4
LT
5654}
5655
48f24c4d
IM
5656/*
5657 * Ensures that the idle task is using init_mm right before its cpu goes
1da177e4
LT
5658 * offline.
5659 */
5660void idle_task_exit(void)
5661{
5662 struct mm_struct *mm = current->active_mm;
5663
5664 BUG_ON(cpu_online(smp_processor_id()));
5665
5666 if (mm != &init_mm)
5667 switch_mm(mm, &init_mm, current);
5668 mmdrop(mm);
5669}
5670
054b9108 5671/* called under rq->lock with disabled interrupts */
36c8b586 5672static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
1da177e4 5673{
70b97a7f 5674 struct rq *rq = cpu_rq(dead_cpu);
1da177e4
LT
5675
5676 /* Must be exiting, otherwise would be on tasklist. */
270f722d 5677 BUG_ON(!p->exit_state);
1da177e4
LT
5678
5679 /* Cannot have done final schedule yet: would have vanished. */
c394cc9f 5680 BUG_ON(p->state == TASK_DEAD);
1da177e4 5681
48f24c4d 5682 get_task_struct(p);
1da177e4
LT
5683
5684 /*
5685 * Drop lock around migration; if someone else moves it,
41a2d6cf 5686 * that's OK. No task can be added to this CPU, so iteration is
1da177e4
LT
5687 * fine.
5688 */
05fa785c 5689 raw_spin_unlock_irq(&rq->lock);
48f24c4d 5690 move_task_off_dead_cpu(dead_cpu, p);
05fa785c 5691 raw_spin_lock_irq(&rq->lock);
1da177e4 5692
48f24c4d 5693 put_task_struct(p);
1da177e4
LT
5694}
5695
5696/* release_task() removes task from tasklist, so we won't find dead tasks. */
5697static void migrate_dead_tasks(unsigned int dead_cpu)
5698{
70b97a7f 5699 struct rq *rq = cpu_rq(dead_cpu);
dd41f596 5700 struct task_struct *next;
48f24c4d 5701
dd41f596
IM
5702 for ( ; ; ) {
5703 if (!rq->nr_running)
5704 break;
b67802ea 5705 next = pick_next_task(rq);
dd41f596
IM
5706 if (!next)
5707 break;
79c53799 5708 next->sched_class->put_prev_task(rq, next);
dd41f596 5709 migrate_dead(dead_cpu, next);
e692ab53 5710
1da177e4
LT
5711 }
5712}
dce48a84
TG
5713
5714/*
5715 * remove the tasks which were accounted by rq from calc_load_tasks.
5716 */
5717static void calc_global_load_remove(struct rq *rq)
5718{
5719 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
a468d389 5720 rq->calc_load_active = 0;
dce48a84 5721}
1da177e4
LT
5722#endif /* CONFIG_HOTPLUG_CPU */
5723
e692ab53
NP
5724#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5725
5726static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
5727 {
5728 .procname = "sched_domain",
c57baf1e 5729 .mode = 0555,
e0361851 5730 },
56992309 5731 {}
e692ab53
NP
5732};
5733
5734static struct ctl_table sd_ctl_root[] = {
e0361851
AD
5735 {
5736 .procname = "kernel",
c57baf1e 5737 .mode = 0555,
e0361851
AD
5738 .child = sd_ctl_dir,
5739 },
56992309 5740 {}
e692ab53
NP
5741};
5742
5743static struct ctl_table *sd_alloc_ctl_entry(int n)
5744{
5745 struct ctl_table *entry =
5cf9f062 5746 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 5747
e692ab53
NP
5748 return entry;
5749}
5750
6382bc90
MM
5751static void sd_free_ctl_entry(struct ctl_table **tablep)
5752{
cd790076 5753 struct ctl_table *entry;
6382bc90 5754
cd790076
MM
5755 /*
5756 * In the intermediate directories, both the child directory and
5757 * procname are dynamically allocated and could fail but the mode
41a2d6cf 5758 * will always be set. In the lowest directory the names are
cd790076
MM
5759 * static strings and all have proc handlers.
5760 */
5761 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
5762 if (entry->child)
5763 sd_free_ctl_entry(&entry->child);
cd790076
MM
5764 if (entry->proc_handler == NULL)
5765 kfree(entry->procname);
5766 }
6382bc90
MM
5767
5768 kfree(*tablep);
5769 *tablep = NULL;
5770}
5771
e692ab53 5772static void
e0361851 5773set_table_entry(struct ctl_table *entry,
e692ab53
NP
5774 const char *procname, void *data, int maxlen,
5775 mode_t mode, proc_handler *proc_handler)
5776{
e692ab53
NP
5777 entry->procname = procname;
5778 entry->data = data;
5779 entry->maxlen = maxlen;
5780 entry->mode = mode;
5781 entry->proc_handler = proc_handler;
5782}
5783
5784static struct ctl_table *
5785sd_alloc_ctl_domain_table(struct sched_domain *sd)
5786{
a5d8c348 5787 struct ctl_table *table = sd_alloc_ctl_entry(13);
e692ab53 5788
ad1cdc1d
MM
5789 if (table == NULL)
5790 return NULL;
5791
e0361851 5792 set_table_entry(&table[0], "min_interval", &sd->min_interval,
e692ab53 5793 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 5794 set_table_entry(&table[1], "max_interval", &sd->max_interval,
e692ab53 5795 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 5796 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
e692ab53 5797 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5798 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
e692ab53 5799 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5800 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
e692ab53 5801 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5802 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
e692ab53 5803 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5804 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
e692ab53 5805 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5806 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
e692ab53 5807 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 5808 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
e692ab53 5809 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 5810 set_table_entry(&table[9], "cache_nice_tries",
e692ab53
NP
5811 &sd->cache_nice_tries,
5812 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 5813 set_table_entry(&table[10], "flags", &sd->flags,
e692ab53 5814 sizeof(int), 0644, proc_dointvec_minmax);
a5d8c348
IM
5815 set_table_entry(&table[11], "name", sd->name,
5816 CORENAME_MAX_SIZE, 0444, proc_dostring);
5817 /* &table[12] is terminator */
e692ab53
NP
5818
5819 return table;
5820}
5821
9a4e7159 5822static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
5823{
5824 struct ctl_table *entry, *table;
5825 struct sched_domain *sd;
5826 int domain_num = 0, i;
5827 char buf[32];
5828
5829 for_each_domain(cpu, sd)
5830 domain_num++;
5831 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
5832 if (table == NULL)
5833 return NULL;
e692ab53
NP
5834
5835 i = 0;
5836 for_each_domain(cpu, sd) {
5837 snprintf(buf, 32, "domain%d", i);
e692ab53 5838 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5839 entry->mode = 0555;
e692ab53
NP
5840 entry->child = sd_alloc_ctl_domain_table(sd);
5841 entry++;
5842 i++;
5843 }
5844 return table;
5845}
5846
5847static struct ctl_table_header *sd_sysctl_header;
6382bc90 5848static void register_sched_domain_sysctl(void)
e692ab53 5849{
6ad4c188 5850 int i, cpu_num = num_possible_cpus();
e692ab53
NP
5851 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5852 char buf[32];
5853
7378547f
MM
5854 WARN_ON(sd_ctl_dir[0].child);
5855 sd_ctl_dir[0].child = entry;
5856
ad1cdc1d
MM
5857 if (entry == NULL)
5858 return;
5859
6ad4c188 5860 for_each_possible_cpu(i) {
e692ab53 5861 snprintf(buf, 32, "cpu%d", i);
e692ab53 5862 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 5863 entry->mode = 0555;
e692ab53 5864 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 5865 entry++;
e692ab53 5866 }
7378547f
MM
5867
5868 WARN_ON(sd_sysctl_header);
e692ab53
NP
5869 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5870}
6382bc90 5871
7378547f 5872/* may be called multiple times per register */
6382bc90
MM
5873static void unregister_sched_domain_sysctl(void)
5874{
7378547f
MM
5875 if (sd_sysctl_header)
5876 unregister_sysctl_table(sd_sysctl_header);
6382bc90 5877 sd_sysctl_header = NULL;
7378547f
MM
5878 if (sd_ctl_dir[0].child)
5879 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 5880}
e692ab53 5881#else
6382bc90
MM
5882static void register_sched_domain_sysctl(void)
5883{
5884}
5885static void unregister_sched_domain_sysctl(void)
e692ab53
NP
5886{
5887}
5888#endif
5889
1f11eb6a
GH
5890static void set_rq_online(struct rq *rq)
5891{
5892 if (!rq->online) {
5893 const struct sched_class *class;
5894
c6c4927b 5895 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5896 rq->online = 1;
5897
5898 for_each_class(class) {
5899 if (class->rq_online)
5900 class->rq_online(rq);
5901 }
5902 }
5903}
5904
5905static void set_rq_offline(struct rq *rq)
5906{
5907 if (rq->online) {
5908 const struct sched_class *class;
5909
5910 for_each_class(class) {
5911 if (class->rq_offline)
5912 class->rq_offline(rq);
5913 }
5914
c6c4927b 5915 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
5916 rq->online = 0;
5917 }
5918}
5919
1da177e4
LT
5920/*
5921 * migration_call - callback that gets triggered when a CPU is added.
5922 * Here we can start up the necessary migration thread for the new CPU.
5923 */
48f24c4d
IM
5924static int __cpuinit
5925migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 5926{
48f24c4d 5927 int cpu = (long)hcpu;
1da177e4 5928 unsigned long flags;
969c7921 5929 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
5930
5931 switch (action) {
5be9361c 5932
1da177e4 5933 case CPU_UP_PREPARE:
8bb78442 5934 case CPU_UP_PREPARE_FROZEN:
a468d389 5935 rq->calc_load_update = calc_load_update;
1da177e4 5936 break;
48f24c4d 5937
1da177e4 5938 case CPU_ONLINE:
8bb78442 5939 case CPU_ONLINE_FROZEN:
1f94ef59 5940 /* Update our root-domain */
05fa785c 5941 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 5942 if (rq->rd) {
c6c4927b 5943 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
5944
5945 set_rq_online(rq);
1f94ef59 5946 }
05fa785c 5947 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 5948 break;
48f24c4d 5949
1da177e4 5950#ifdef CONFIG_HOTPLUG_CPU
1da177e4 5951 case CPU_DEAD:
8bb78442 5952 case CPU_DEAD_FROZEN:
1da177e4 5953 migrate_live_tasks(cpu);
1da177e4 5954 /* Idle task back to normal (off runqueue, low prio) */
05fa785c 5955 raw_spin_lock_irq(&rq->lock);
2e1cb74a 5956 deactivate_task(rq, rq->idle, 0);
dd41f596
IM
5957 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5958 rq->idle->sched_class = &idle_sched_class;
1da177e4 5959 migrate_dead_tasks(cpu);
05fa785c 5960 raw_spin_unlock_irq(&rq->lock);
1da177e4
LT
5961 migrate_nr_uninterruptible(rq);
5962 BUG_ON(rq->nr_running != 0);
dce48a84 5963 calc_global_load_remove(rq);
1da177e4 5964 break;
57d885fe 5965
08f503b0
GH
5966 case CPU_DYING:
5967 case CPU_DYING_FROZEN:
57d885fe 5968 /* Update our root-domain */
05fa785c 5969 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 5970 if (rq->rd) {
c6c4927b 5971 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 5972 set_rq_offline(rq);
57d885fe 5973 }
05fa785c 5974 raw_spin_unlock_irqrestore(&rq->lock, flags);
57d885fe 5975 break;
1da177e4
LT
5976#endif
5977 }
5978 return NOTIFY_OK;
5979}
5980
f38b0820
PM
5981/*
5982 * Register at high priority so that task migration (migrate_all_tasks)
5983 * happens before everything else. This has to be lower priority than
cdd6c482 5984 * the notifier in the perf_event subsystem, though.
1da177e4 5985 */
26c2143b 5986static struct notifier_block __cpuinitdata migration_notifier = {
1da177e4 5987 .notifier_call = migration_call,
50a323b7 5988 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
5989};
5990
3a101d05
TH
5991static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5992 unsigned long action, void *hcpu)
5993{
5994 switch (action & ~CPU_TASKS_FROZEN) {
5995 case CPU_ONLINE:
5996 case CPU_DOWN_FAILED:
5997 set_cpu_active((long)hcpu, true);
5998 return NOTIFY_OK;
5999 default:
6000 return NOTIFY_DONE;
6001 }
6002}
6003
6004static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6005 unsigned long action, void *hcpu)
6006{
6007 switch (action & ~CPU_TASKS_FROZEN) {
6008 case CPU_DOWN_PREPARE:
6009 set_cpu_active((long)hcpu, false);
6010 return NOTIFY_OK;
6011 default:
6012 return NOTIFY_DONE;
6013 }
6014}
6015
7babe8db 6016static int __init migration_init(void)
1da177e4
LT
6017{
6018 void *cpu = (void *)(long)smp_processor_id();
07dccf33 6019 int err;
48f24c4d 6020
3a101d05 6021 /* Initialize migration for the boot CPU */
07dccf33
AM
6022 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6023 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
6024 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6025 register_cpu_notifier(&migration_notifier);
7babe8db 6026
3a101d05
TH
6027 /* Register cpu active notifiers */
6028 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6029 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6030
a004cd42 6031 return 0;
1da177e4 6032}
7babe8db 6033early_initcall(migration_init);
1da177e4
LT
6034#endif
6035
6036#ifdef CONFIG_SMP
476f3534 6037
3e9830dc 6038#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 6039
f6630114
MT
6040static __read_mostly int sched_domain_debug_enabled;
6041
6042static int __init sched_domain_debug_setup(char *str)
6043{
6044 sched_domain_debug_enabled = 1;
6045
6046 return 0;
6047}
6048early_param("sched_debug", sched_domain_debug_setup);
6049
7c16ec58 6050static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 6051 struct cpumask *groupmask)
1da177e4 6052{
4dcf6aff 6053 struct sched_group *group = sd->groups;
434d53b0 6054 char str[256];
1da177e4 6055
968ea6d8 6056 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
96f874e2 6057 cpumask_clear(groupmask);
4dcf6aff
IM
6058
6059 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6060
6061 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 6062 printk("does not load-balance\n");
4dcf6aff 6063 if (sd->parent)
3df0fc5b
PZ
6064 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6065 " has parent");
4dcf6aff 6066 return -1;
41c7ce9a
NP
6067 }
6068
3df0fc5b 6069 printk(KERN_CONT "span %s level %s\n", str, sd->name);
4dcf6aff 6070
758b2cdc 6071 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
6072 printk(KERN_ERR "ERROR: domain->span does not contain "
6073 "CPU%d\n", cpu);
4dcf6aff 6074 }
758b2cdc 6075 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
6076 printk(KERN_ERR "ERROR: domain->groups does not contain"
6077 " CPU%d\n", cpu);
4dcf6aff 6078 }
1da177e4 6079
4dcf6aff 6080 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 6081 do {
4dcf6aff 6082 if (!group) {
3df0fc5b
PZ
6083 printk("\n");
6084 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
6085 break;
6086 }
6087
18a3885f 6088 if (!group->cpu_power) {
3df0fc5b
PZ
6089 printk(KERN_CONT "\n");
6090 printk(KERN_ERR "ERROR: domain->cpu_power not "
6091 "set\n");
4dcf6aff
IM
6092 break;
6093 }
1da177e4 6094
758b2cdc 6095 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
6096 printk(KERN_CONT "\n");
6097 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
6098 break;
6099 }
1da177e4 6100
758b2cdc 6101 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
6102 printk(KERN_CONT "\n");
6103 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
6104 break;
6105 }
1da177e4 6106
758b2cdc 6107 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 6108
968ea6d8 6109 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
381512cf 6110
3df0fc5b 6111 printk(KERN_CONT " %s", str);
18a3885f 6112 if (group->cpu_power != SCHED_LOAD_SCALE) {
3df0fc5b
PZ
6113 printk(KERN_CONT " (cpu_power = %d)",
6114 group->cpu_power);
381512cf 6115 }
1da177e4 6116
4dcf6aff
IM
6117 group = group->next;
6118 } while (group != sd->groups);
3df0fc5b 6119 printk(KERN_CONT "\n");
1da177e4 6120
758b2cdc 6121 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 6122 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 6123
758b2cdc
RR
6124 if (sd->parent &&
6125 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
6126 printk(KERN_ERR "ERROR: parent span is not a superset "
6127 "of domain->span\n");
4dcf6aff
IM
6128 return 0;
6129}
1da177e4 6130
4dcf6aff
IM
6131static void sched_domain_debug(struct sched_domain *sd, int cpu)
6132{
d5dd3db1 6133 cpumask_var_t groupmask;
4dcf6aff 6134 int level = 0;
1da177e4 6135
f6630114
MT
6136 if (!sched_domain_debug_enabled)
6137 return;
6138
4dcf6aff
IM
6139 if (!sd) {
6140 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6141 return;
6142 }
1da177e4 6143
4dcf6aff
IM
6144 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6145
d5dd3db1 6146 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
7c16ec58
MT
6147 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6148 return;
6149 }
6150
4dcf6aff 6151 for (;;) {
7c16ec58 6152 if (sched_domain_debug_one(sd, cpu, level, groupmask))
4dcf6aff 6153 break;
1da177e4
LT
6154 level++;
6155 sd = sd->parent;
33859f7f 6156 if (!sd)
4dcf6aff
IM
6157 break;
6158 }
d5dd3db1 6159 free_cpumask_var(groupmask);
1da177e4 6160}
6d6bc0ad 6161#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 6162# define sched_domain_debug(sd, cpu) do { } while (0)
6d6bc0ad 6163#endif /* CONFIG_SCHED_DEBUG */
1da177e4 6164
1a20ff27 6165static int sd_degenerate(struct sched_domain *sd)
245af2c7 6166{
758b2cdc 6167 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
6168 return 1;
6169
6170 /* Following flags need at least 2 groups */
6171 if (sd->flags & (SD_LOAD_BALANCE |
6172 SD_BALANCE_NEWIDLE |
6173 SD_BALANCE_FORK |
89c4710e
SS
6174 SD_BALANCE_EXEC |
6175 SD_SHARE_CPUPOWER |
6176 SD_SHARE_PKG_RESOURCES)) {
245af2c7
SS
6177 if (sd->groups != sd->groups->next)
6178 return 0;
6179 }
6180
6181 /* Following flags don't use groups */
c88d5910 6182 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
6183 return 0;
6184
6185 return 1;
6186}
6187
48f24c4d
IM
6188static int
6189sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
6190{
6191 unsigned long cflags = sd->flags, pflags = parent->flags;
6192
6193 if (sd_degenerate(parent))
6194 return 1;
6195
758b2cdc 6196 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
6197 return 0;
6198
245af2c7
SS
6199 /* Flags needing groups don't count if only 1 group in parent */
6200 if (parent->groups == parent->groups->next) {
6201 pflags &= ~(SD_LOAD_BALANCE |
6202 SD_BALANCE_NEWIDLE |
6203 SD_BALANCE_FORK |
89c4710e
SS
6204 SD_BALANCE_EXEC |
6205 SD_SHARE_CPUPOWER |
6206 SD_SHARE_PKG_RESOURCES);
5436499e
KC
6207 if (nr_node_ids == 1)
6208 pflags &= ~SD_SERIALIZE;
245af2c7
SS
6209 }
6210 if (~cflags & pflags)
6211 return 0;
6212
6213 return 1;
6214}
6215
c6c4927b
RR
6216static void free_rootdomain(struct root_domain *rd)
6217{
047106ad
PZ
6218 synchronize_sched();
6219
68e74568
RR
6220 cpupri_cleanup(&rd->cpupri);
6221
c6c4927b
RR
6222 free_cpumask_var(rd->rto_mask);
6223 free_cpumask_var(rd->online);
6224 free_cpumask_var(rd->span);
6225 kfree(rd);
6226}
6227
57d885fe
GH
6228static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6229{
a0490fa3 6230 struct root_domain *old_rd = NULL;
57d885fe 6231 unsigned long flags;
57d885fe 6232
05fa785c 6233 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
6234
6235 if (rq->rd) {
a0490fa3 6236 old_rd = rq->rd;
57d885fe 6237
c6c4927b 6238 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 6239 set_rq_offline(rq);
57d885fe 6240
c6c4927b 6241 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 6242
a0490fa3
IM
6243 /*
6244 * If we dont want to free the old_rt yet then
6245 * set old_rd to NULL to skip the freeing later
6246 * in this function:
6247 */
6248 if (!atomic_dec_and_test(&old_rd->refcount))
6249 old_rd = NULL;
57d885fe
GH
6250 }
6251
6252 atomic_inc(&rd->refcount);
6253 rq->rd = rd;
6254
c6c4927b 6255 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 6256 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 6257 set_rq_online(rq);
57d885fe 6258
05fa785c 6259 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
6260
6261 if (old_rd)
6262 free_rootdomain(old_rd);
57d885fe
GH
6263}
6264
68c38fc3 6265static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
6266{
6267 memset(rd, 0, sizeof(*rd));
6268
68c38fc3 6269 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 6270 goto out;
68c38fc3 6271 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 6272 goto free_span;
68c38fc3 6273 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
c6c4927b 6274 goto free_online;
6e0534f2 6275
68c38fc3 6276 if (cpupri_init(&rd->cpupri) != 0)
68e74568 6277 goto free_rto_mask;
c6c4927b 6278 return 0;
6e0534f2 6279
68e74568
RR
6280free_rto_mask:
6281 free_cpumask_var(rd->rto_mask);
c6c4927b
RR
6282free_online:
6283 free_cpumask_var(rd->online);
6284free_span:
6285 free_cpumask_var(rd->span);
0c910d28 6286out:
c6c4927b 6287 return -ENOMEM;
57d885fe
GH
6288}
6289
6290static void init_defrootdomain(void)
6291{
68c38fc3 6292 init_rootdomain(&def_root_domain);
c6c4927b 6293
57d885fe
GH
6294 atomic_set(&def_root_domain.refcount, 1);
6295}
6296
dc938520 6297static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
6298{
6299 struct root_domain *rd;
6300
6301 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6302 if (!rd)
6303 return NULL;
6304
68c38fc3 6305 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
6306 kfree(rd);
6307 return NULL;
6308 }
57d885fe
GH
6309
6310 return rd;
6311}
6312
1da177e4 6313/*
0eab9146 6314 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
6315 * hold the hotplug lock.
6316 */
0eab9146
IM
6317static void
6318cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 6319{
70b97a7f 6320 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
6321 struct sched_domain *tmp;
6322
669c55e9
PZ
6323 for (tmp = sd; tmp; tmp = tmp->parent)
6324 tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6325
245af2c7 6326 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 6327 for (tmp = sd; tmp; ) {
245af2c7
SS
6328 struct sched_domain *parent = tmp->parent;
6329 if (!parent)
6330 break;
f29c9b1c 6331
1a848870 6332 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 6333 tmp->parent = parent->parent;
1a848870
SS
6334 if (parent->parent)
6335 parent->parent->child = tmp;
f29c9b1c
LZ
6336 } else
6337 tmp = tmp->parent;
245af2c7
SS
6338 }
6339
1a848870 6340 if (sd && sd_degenerate(sd)) {
245af2c7 6341 sd = sd->parent;
1a848870
SS
6342 if (sd)
6343 sd->child = NULL;
6344 }
1da177e4
LT
6345
6346 sched_domain_debug(sd, cpu);
6347
57d885fe 6348 rq_attach_root(rq, rd);
674311d5 6349 rcu_assign_pointer(rq->sd, sd);
1da177e4
LT
6350}
6351
6352/* cpus with isolated domains */
dcc30a35 6353static cpumask_var_t cpu_isolated_map;
1da177e4
LT
6354
6355/* Setup the mask of cpus configured for isolated domains */
6356static int __init isolated_cpu_setup(char *str)
6357{
bdddd296 6358 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 6359 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
6360 return 1;
6361}
6362
8927f494 6363__setup("isolcpus=", isolated_cpu_setup);
1da177e4
LT
6364
6365/*
6711cab4
SS
6366 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6367 * to a function which identifies what group(along with sched group) a CPU
96f874e2
RR
6368 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6369 * (due to the fact that we keep track of groups covered with a struct cpumask).
1da177e4
LT
6370 *
6371 * init_sched_build_groups will build a circular linked list of the groups
6372 * covered by the given span, and will set each group's ->cpumask correctly,
6373 * and ->cpu_power to 0.
6374 */
a616058b 6375static void
96f874e2
RR
6376init_sched_build_groups(const struct cpumask *span,
6377 const struct cpumask *cpu_map,
6378 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
7c16ec58 6379 struct sched_group **sg,
96f874e2
RR
6380 struct cpumask *tmpmask),
6381 struct cpumask *covered, struct cpumask *tmpmask)
1da177e4
LT
6382{
6383 struct sched_group *first = NULL, *last = NULL;
1da177e4
LT
6384 int i;
6385
96f874e2 6386 cpumask_clear(covered);
7c16ec58 6387
abcd083a 6388 for_each_cpu(i, span) {
6711cab4 6389 struct sched_group *sg;
7c16ec58 6390 int group = group_fn(i, cpu_map, &sg, tmpmask);
1da177e4
LT
6391 int j;
6392
758b2cdc 6393 if (cpumask_test_cpu(i, covered))
1da177e4
LT
6394 continue;
6395
758b2cdc 6396 cpumask_clear(sched_group_cpus(sg));
18a3885f 6397 sg->cpu_power = 0;
1da177e4 6398
abcd083a 6399 for_each_cpu(j, span) {
7c16ec58 6400 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
1da177e4
LT
6401 continue;
6402
96f874e2 6403 cpumask_set_cpu(j, covered);
758b2cdc 6404 cpumask_set_cpu(j, sched_group_cpus(sg));
1da177e4
LT
6405 }
6406 if (!first)
6407 first = sg;
6408 if (last)
6409 last->next = sg;
6410 last = sg;
6411 }
6412 last->next = first;
6413}
6414
9c1cfda2 6415#define SD_NODES_PER_DOMAIN 16
1da177e4 6416
9c1cfda2 6417#ifdef CONFIG_NUMA
198e2f18 6418
9c1cfda2
JH
6419/**
6420 * find_next_best_node - find the next node to include in a sched_domain
6421 * @node: node whose sched_domain we're building
6422 * @used_nodes: nodes already in the sched_domain
6423 *
41a2d6cf 6424 * Find the next node to include in a given scheduling domain. Simply
9c1cfda2
JH
6425 * finds the closest node not already in the @used_nodes map.
6426 *
6427 * Should use nodemask_t.
6428 */
c5f59f08 6429static int find_next_best_node(int node, nodemask_t *used_nodes)
9c1cfda2
JH
6430{
6431 int i, n, val, min_val, best_node = 0;
6432
6433 min_val = INT_MAX;
6434
076ac2af 6435 for (i = 0; i < nr_node_ids; i++) {
9c1cfda2 6436 /* Start at @node */
076ac2af 6437 n = (node + i) % nr_node_ids;
9c1cfda2
JH
6438
6439 if (!nr_cpus_node(n))
6440 continue;
6441
6442 /* Skip already used nodes */
c5f59f08 6443 if (node_isset(n, *used_nodes))
9c1cfda2
JH
6444 continue;
6445
6446 /* Simple min distance search */
6447 val = node_distance(node, n);
6448
6449 if (val < min_val) {
6450 min_val = val;
6451 best_node = n;
6452 }
6453 }
6454
c5f59f08 6455 node_set(best_node, *used_nodes);
9c1cfda2
JH
6456 return best_node;
6457}
6458
6459/**
6460 * sched_domain_node_span - get a cpumask for a node's sched_domain
6461 * @node: node whose cpumask we're constructing
73486722 6462 * @span: resulting cpumask
9c1cfda2 6463 *
41a2d6cf 6464 * Given a node, construct a good cpumask for its sched_domain to span. It
9c1cfda2
JH
6465 * should be one that prevents unnecessary balancing, but also spreads tasks
6466 * out optimally.
6467 */
96f874e2 6468static void sched_domain_node_span(int node, struct cpumask *span)
9c1cfda2 6469{
c5f59f08 6470 nodemask_t used_nodes;
48f24c4d 6471 int i;
9c1cfda2 6472
6ca09dfc 6473 cpumask_clear(span);
c5f59f08 6474 nodes_clear(used_nodes);
9c1cfda2 6475
6ca09dfc 6476 cpumask_or(span, span, cpumask_of_node(node));
c5f59f08 6477 node_set(node, used_nodes);
9c1cfda2
JH
6478
6479 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
c5f59f08 6480 int next_node = find_next_best_node(node, &used_nodes);
48f24c4d 6481
6ca09dfc 6482 cpumask_or(span, span, cpumask_of_node(next_node));
9c1cfda2 6483 }
9c1cfda2 6484}
6d6bc0ad 6485#endif /* CONFIG_NUMA */
9c1cfda2 6486
5c45bf27 6487int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
48f24c4d 6488
6c99e9ad
RR
6489/*
6490 * The cpus mask in sched_group and sched_domain hangs off the end.
4200efd9
IM
6491 *
6492 * ( See the the comments in include/linux/sched.h:struct sched_group
6493 * and struct sched_domain. )
6c99e9ad
RR
6494 */
6495struct static_sched_group {
6496 struct sched_group sg;
6497 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
6498};
6499
6500struct static_sched_domain {
6501 struct sched_domain sd;
6502 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
6503};
6504
49a02c51
AH
6505struct s_data {
6506#ifdef CONFIG_NUMA
6507 int sd_allnodes;
6508 cpumask_var_t domainspan;
6509 cpumask_var_t covered;
6510 cpumask_var_t notcovered;
6511#endif
6512 cpumask_var_t nodemask;
6513 cpumask_var_t this_sibling_map;
6514 cpumask_var_t this_core_map;
6515 cpumask_var_t send_covered;
6516 cpumask_var_t tmpmask;
6517 struct sched_group **sched_group_nodes;
6518 struct root_domain *rd;
6519};
6520
2109b99e
AH
6521enum s_alloc {
6522 sa_sched_groups = 0,
6523 sa_rootdomain,
6524 sa_tmpmask,
6525 sa_send_covered,
6526 sa_this_core_map,
6527 sa_this_sibling_map,
6528 sa_nodemask,
6529 sa_sched_group_nodes,
6530#ifdef CONFIG_NUMA
6531 sa_notcovered,
6532 sa_covered,
6533 sa_domainspan,
6534#endif
6535 sa_none,
6536};
6537
9c1cfda2 6538/*
48f24c4d 6539 * SMT sched-domains:
9c1cfda2 6540 */
1da177e4 6541#ifdef CONFIG_SCHED_SMT
6c99e9ad 6542static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
1871e52c 6543static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
48f24c4d 6544
41a2d6cf 6545static int
96f874e2
RR
6546cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
6547 struct sched_group **sg, struct cpumask *unused)
1da177e4 6548{
6711cab4 6549 if (sg)
1871e52c 6550 *sg = &per_cpu(sched_groups, cpu).sg;
1da177e4
LT
6551 return cpu;
6552}
6d6bc0ad 6553#endif /* CONFIG_SCHED_SMT */
1da177e4 6554
48f24c4d
IM
6555/*
6556 * multi-core sched-domains:
6557 */
1e9f28fa 6558#ifdef CONFIG_SCHED_MC
6c99e9ad
RR
6559static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
6560static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
6d6bc0ad 6561#endif /* CONFIG_SCHED_MC */
1e9f28fa
SS
6562
6563#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
41a2d6cf 6564static int
96f874e2
RR
6565cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6566 struct sched_group **sg, struct cpumask *mask)
1e9f28fa 6567{
6711cab4 6568 int group;
7c16ec58 6569
c69fc56d 6570 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
96f874e2 6571 group = cpumask_first(mask);
6711cab4 6572 if (sg)
6c99e9ad 6573 *sg = &per_cpu(sched_group_core, group).sg;
6711cab4 6574 return group;
1e9f28fa
SS
6575}
6576#elif defined(CONFIG_SCHED_MC)
41a2d6cf 6577static int
96f874e2
RR
6578cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
6579 struct sched_group **sg, struct cpumask *unused)
1e9f28fa 6580{
6711cab4 6581 if (sg)
6c99e9ad 6582 *sg = &per_cpu(sched_group_core, cpu).sg;
1e9f28fa
SS
6583 return cpu;
6584}
6585#endif
6586
6c99e9ad
RR
6587static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
6588static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
48f24c4d 6589
41a2d6cf 6590static int
96f874e2
RR
6591cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
6592 struct sched_group **sg, struct cpumask *mask)
1da177e4 6593{
6711cab4 6594 int group;
48f24c4d 6595#ifdef CONFIG_SCHED_MC
6ca09dfc 6596 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
96f874e2 6597 group = cpumask_first(mask);
1e9f28fa 6598#elif defined(CONFIG_SCHED_SMT)
c69fc56d 6599 cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
96f874e2 6600 group = cpumask_first(mask);
1da177e4 6601#else
6711cab4 6602 group = cpu;
1da177e4 6603#endif
6711cab4 6604 if (sg)
6c99e9ad 6605 *sg = &per_cpu(sched_group_phys, group).sg;
6711cab4 6606 return group;
1da177e4
LT
6607}
6608
6609#ifdef CONFIG_NUMA
1da177e4 6610/*
9c1cfda2
JH
6611 * The init_sched_build_groups can't handle what we want to do with node
6612 * groups, so roll our own. Now each node has its own list of groups which
6613 * gets dynamically allocated.
1da177e4 6614 */
62ea9ceb 6615static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
434d53b0 6616static struct sched_group ***sched_group_nodes_bycpu;
1da177e4 6617
62ea9ceb 6618static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
6c99e9ad 6619static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
9c1cfda2 6620
96f874e2
RR
6621static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
6622 struct sched_group **sg,
6623 struct cpumask *nodemask)
9c1cfda2 6624{
6711cab4
SS
6625 int group;
6626
6ca09dfc 6627 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
96f874e2 6628 group = cpumask_first(nodemask);
6711cab4
SS
6629
6630 if (sg)
6c99e9ad 6631 *sg = &per_cpu(sched_group_allnodes, group).sg;
6711cab4 6632 return group;
1da177e4 6633}
6711cab4 6634
08069033
SS
6635static void init_numa_sched_groups_power(struct sched_group *group_head)
6636{
6637 struct sched_group *sg = group_head;
6638 int j;
6639
6640 if (!sg)
6641 return;
3a5c359a 6642 do {
758b2cdc 6643 for_each_cpu(j, sched_group_cpus(sg)) {
3a5c359a 6644 struct sched_domain *sd;
08069033 6645
6c99e9ad 6646 sd = &per_cpu(phys_domains, j).sd;
13318a71 6647 if (j != group_first_cpu(sd->groups)) {
3a5c359a
AK
6648 /*
6649 * Only add "power" once for each
6650 * physical package.
6651 */
6652 continue;
6653 }
08069033 6654
18a3885f 6655 sg->cpu_power += sd->groups->cpu_power;
3a5c359a
AK
6656 }
6657 sg = sg->next;
6658 } while (sg != group_head);
08069033 6659}
0601a88d
AH
6660
6661static int build_numa_sched_groups(struct s_data *d,
6662 const struct cpumask *cpu_map, int num)
6663{
6664 struct sched_domain *sd;
6665 struct sched_group *sg, *prev;
6666 int n, j;
6667
6668 cpumask_clear(d->covered);
6669 cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
6670 if (cpumask_empty(d->nodemask)) {
6671 d->sched_group_nodes[num] = NULL;
6672 goto out;
6673 }
6674
6675 sched_domain_node_span(num, d->domainspan);
6676 cpumask_and(d->domainspan, d->domainspan, cpu_map);
6677
6678 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
6679 GFP_KERNEL, num);
6680 if (!sg) {
3df0fc5b
PZ
6681 printk(KERN_WARNING "Can not alloc domain group for node %d\n",
6682 num);
0601a88d
AH
6683 return -ENOMEM;
6684 }
6685 d->sched_group_nodes[num] = sg;
6686
6687 for_each_cpu(j, d->nodemask) {
6688 sd = &per_cpu(node_domains, j).sd;
6689 sd->groups = sg;
6690 }
6691
18a3885f 6692 sg->cpu_power = 0;
0601a88d
AH
6693 cpumask_copy(sched_group_cpus(sg), d->nodemask);
6694 sg->next = sg;
6695 cpumask_or(d->covered, d->covered, d->nodemask);
6696
6697 prev = sg;
6698 for (j = 0; j < nr_node_ids; j++) {
6699 n = (num + j) % nr_node_ids;
6700 cpumask_complement(d->notcovered, d->covered);
6701 cpumask_and(d->tmpmask, d->notcovered, cpu_map);
6702 cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
6703 if (cpumask_empty(d->tmpmask))
6704 break;
6705 cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
6706 if (cpumask_empty(d->tmpmask))
6707 continue;
6708 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
6709 GFP_KERNEL, num);
6710 if (!sg) {
3df0fc5b
PZ
6711 printk(KERN_WARNING
6712 "Can not alloc domain group for node %d\n", j);
0601a88d
AH
6713 return -ENOMEM;
6714 }
18a3885f 6715 sg->cpu_power = 0;
0601a88d
AH
6716 cpumask_copy(sched_group_cpus(sg), d->tmpmask);
6717 sg->next = prev->next;
6718 cpumask_or(d->covered, d->covered, d->tmpmask);
6719 prev->next = sg;
6720 prev = sg;
6721 }
6722out:
6723 return 0;
6724}
6d6bc0ad 6725#endif /* CONFIG_NUMA */
1da177e4 6726
a616058b 6727#ifdef CONFIG_NUMA
51888ca2 6728/* Free memory allocated for various sched_group structures */
96f874e2
RR
6729static void free_sched_groups(const struct cpumask *cpu_map,
6730 struct cpumask *nodemask)
51888ca2 6731{
a616058b 6732 int cpu, i;
51888ca2 6733
abcd083a 6734 for_each_cpu(cpu, cpu_map) {
51888ca2
SV
6735 struct sched_group **sched_group_nodes
6736 = sched_group_nodes_bycpu[cpu];
6737
51888ca2
SV
6738 if (!sched_group_nodes)
6739 continue;
6740
076ac2af 6741 for (i = 0; i < nr_node_ids; i++) {
51888ca2
SV
6742 struct sched_group *oldsg, *sg = sched_group_nodes[i];
6743
6ca09dfc 6744 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
96f874e2 6745 if (cpumask_empty(nodemask))
51888ca2
SV
6746 continue;
6747
6748 if (sg == NULL)
6749 continue;
6750 sg = sg->next;
6751next_sg:
6752 oldsg = sg;
6753 sg = sg->next;
6754 kfree(oldsg);
6755 if (oldsg != sched_group_nodes[i])
6756 goto next_sg;
6757 }
6758 kfree(sched_group_nodes);
6759 sched_group_nodes_bycpu[cpu] = NULL;
6760 }
51888ca2 6761}
6d6bc0ad 6762#else /* !CONFIG_NUMA */
96f874e2
RR
6763static void free_sched_groups(const struct cpumask *cpu_map,
6764 struct cpumask *nodemask)
a616058b
SS
6765{
6766}
6d6bc0ad 6767#endif /* CONFIG_NUMA */
51888ca2 6768
89c4710e
SS
6769/*
6770 * Initialize sched groups cpu_power.
6771 *
6772 * cpu_power indicates the capacity of sched group, which is used while
6773 * distributing the load between different sched groups in a sched domain.
6774 * Typically cpu_power for all the groups in a sched domain will be same unless
6775 * there are asymmetries in the topology. If there are asymmetries, group
6776 * having more cpu_power will pickup more load compared to the group having
6777 * less cpu_power.
89c4710e
SS
6778 */
6779static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6780{
6781 struct sched_domain *child;
6782 struct sched_group *group;
f93e65c1
PZ
6783 long power;
6784 int weight;
89c4710e
SS
6785
6786 WARN_ON(!sd || !sd->groups);
6787
13318a71 6788 if (cpu != group_first_cpu(sd->groups))
89c4710e
SS
6789 return;
6790
6791 child = sd->child;
6792
18a3885f 6793 sd->groups->cpu_power = 0;
5517d86b 6794
f93e65c1
PZ
6795 if (!child) {
6796 power = SCHED_LOAD_SCALE;
6797 weight = cpumask_weight(sched_domain_span(sd));
6798 /*
6799 * SMT siblings share the power of a single core.
a52bfd73
PZ
6800 * Usually multiple threads get a better yield out of
6801 * that one core than a single thread would have,
6802 * reflect that in sd->smt_gain.
f93e65c1 6803 */
a52bfd73
PZ
6804 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
6805 power *= sd->smt_gain;
f93e65c1 6806 power /= weight;
a52bfd73
PZ
6807 power >>= SCHED_LOAD_SHIFT;
6808 }
18a3885f 6809 sd->groups->cpu_power += power;
89c4710e
SS
6810 return;
6811 }
6812
89c4710e 6813 /*
f93e65c1 6814 * Add cpu_power of each child group to this groups cpu_power.
89c4710e
SS
6815 */
6816 group = child->groups;
6817 do {
18a3885f 6818 sd->groups->cpu_power += group->cpu_power;
89c4710e
SS
6819 group = group->next;
6820 } while (group != child->groups);
6821}
6822
7c16ec58
MT
6823/*
6824 * Initializers for schedule domains
6825 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6826 */
6827
a5d8c348
IM
6828#ifdef CONFIG_SCHED_DEBUG
6829# define SD_INIT_NAME(sd, type) sd->name = #type
6830#else
6831# define SD_INIT_NAME(sd, type) do { } while (0)
6832#endif
6833
7c16ec58 6834#define SD_INIT(sd, type) sd_init_##type(sd)
a5d8c348 6835
7c16ec58
MT
6836#define SD_INIT_FUNC(type) \
6837static noinline void sd_init_##type(struct sched_domain *sd) \
6838{ \
6839 memset(sd, 0, sizeof(*sd)); \
6840 *sd = SD_##type##_INIT; \
1d3504fc 6841 sd->level = SD_LV_##type; \
a5d8c348 6842 SD_INIT_NAME(sd, type); \
7c16ec58
MT
6843}
6844
6845SD_INIT_FUNC(CPU)
6846#ifdef CONFIG_NUMA
6847 SD_INIT_FUNC(ALLNODES)
6848 SD_INIT_FUNC(NODE)
6849#endif
6850#ifdef CONFIG_SCHED_SMT
6851 SD_INIT_FUNC(SIBLING)
6852#endif
6853#ifdef CONFIG_SCHED_MC
6854 SD_INIT_FUNC(MC)
6855#endif
6856
1d3504fc
HS
6857static int default_relax_domain_level = -1;
6858
6859static int __init setup_relax_domain_level(char *str)
6860{
30e0e178
LZ
6861 unsigned long val;
6862
6863 val = simple_strtoul(str, NULL, 0);
6864 if (val < SD_LV_MAX)
6865 default_relax_domain_level = val;
6866
1d3504fc
HS
6867 return 1;
6868}
6869__setup("relax_domain_level=", setup_relax_domain_level);
6870
6871static void set_domain_attribute(struct sched_domain *sd,
6872 struct sched_domain_attr *attr)
6873{
6874 int request;
6875
6876 if (!attr || attr->relax_domain_level < 0) {
6877 if (default_relax_domain_level < 0)
6878 return;
6879 else
6880 request = default_relax_domain_level;
6881 } else
6882 request = attr->relax_domain_level;
6883 if (request < sd->level) {
6884 /* turn off idle balance on this domain */
c88d5910 6885 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6886 } else {
6887 /* turn on idle balance on this domain */
c88d5910 6888 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
6889 }
6890}
6891
2109b99e
AH
6892static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6893 const struct cpumask *cpu_map)
6894{
6895 switch (what) {
6896 case sa_sched_groups:
6897 free_sched_groups(cpu_map, d->tmpmask); /* fall through */
6898 d->sched_group_nodes = NULL;
6899 case sa_rootdomain:
6900 free_rootdomain(d->rd); /* fall through */
6901 case sa_tmpmask:
6902 free_cpumask_var(d->tmpmask); /* fall through */
6903 case sa_send_covered:
6904 free_cpumask_var(d->send_covered); /* fall through */
6905 case sa_this_core_map:
6906 free_cpumask_var(d->this_core_map); /* fall through */
6907 case sa_this_sibling_map:
6908 free_cpumask_var(d->this_sibling_map); /* fall through */
6909 case sa_nodemask:
6910 free_cpumask_var(d->nodemask); /* fall through */
6911 case sa_sched_group_nodes:
d1b55138 6912#ifdef CONFIG_NUMA
2109b99e
AH
6913 kfree(d->sched_group_nodes); /* fall through */
6914 case sa_notcovered:
6915 free_cpumask_var(d->notcovered); /* fall through */
6916 case sa_covered:
6917 free_cpumask_var(d->covered); /* fall through */
6918 case sa_domainspan:
6919 free_cpumask_var(d->domainspan); /* fall through */
3404c8d9 6920#endif
2109b99e
AH
6921 case sa_none:
6922 break;
6923 }
6924}
3404c8d9 6925
2109b99e
AH
6926static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6927 const struct cpumask *cpu_map)
6928{
3404c8d9 6929#ifdef CONFIG_NUMA
2109b99e
AH
6930 if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
6931 return sa_none;
6932 if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
6933 return sa_domainspan;
6934 if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
6935 return sa_covered;
6936 /* Allocate the per-node list of sched groups */
6937 d->sched_group_nodes = kcalloc(nr_node_ids,
6938 sizeof(struct sched_group *), GFP_KERNEL);
6939 if (!d->sched_group_nodes) {
3df0fc5b 6940 printk(KERN_WARNING "Can not alloc sched group node list\n");
2109b99e 6941 return sa_notcovered;
d1b55138 6942 }
2109b99e 6943 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
d1b55138 6944#endif
2109b99e
AH
6945 if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
6946 return sa_sched_group_nodes;
6947 if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
6948 return sa_nodemask;
6949 if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
6950 return sa_this_sibling_map;
6951 if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
6952 return sa_this_core_map;
6953 if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
6954 return sa_send_covered;
6955 d->rd = alloc_rootdomain();
6956 if (!d->rd) {
3df0fc5b 6957 printk(KERN_WARNING "Cannot alloc root domain\n");
2109b99e 6958 return sa_tmpmask;
57d885fe 6959 }
2109b99e
AH
6960 return sa_rootdomain;
6961}
57d885fe 6962
7f4588f3
AH
6963static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
6964 const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
6965{
6966 struct sched_domain *sd = NULL;
7c16ec58 6967#ifdef CONFIG_NUMA
7f4588f3 6968 struct sched_domain *parent;
1da177e4 6969
7f4588f3
AH
6970 d->sd_allnodes = 0;
6971 if (cpumask_weight(cpu_map) >
6972 SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
6973 sd = &per_cpu(allnodes_domains, i).sd;
6974 SD_INIT(sd, ALLNODES);
1d3504fc 6975 set_domain_attribute(sd, attr);
7f4588f3
AH
6976 cpumask_copy(sched_domain_span(sd), cpu_map);
6977 cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
6978 d->sd_allnodes = 1;
6979 }
6980 parent = sd;
6981
6982 sd = &per_cpu(node_domains, i).sd;
6983 SD_INIT(sd, NODE);
6984 set_domain_attribute(sd, attr);
6985 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
6986 sd->parent = parent;
6987 if (parent)
6988 parent->child = sd;
6989 cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
1da177e4 6990#endif
7f4588f3
AH
6991 return sd;
6992}
1da177e4 6993
87cce662
AH
6994static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
6995 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
6996 struct sched_domain *parent, int i)
6997{
6998 struct sched_domain *sd;
6999 sd = &per_cpu(phys_domains, i).sd;
7000 SD_INIT(sd, CPU);
7001 set_domain_attribute(sd, attr);
7002 cpumask_copy(sched_domain_span(sd), d->nodemask);
7003 sd->parent = parent;
7004 if (parent)
7005 parent->child = sd;
7006 cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
7007 return sd;
7008}
1da177e4 7009
410c4081
AH
7010static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
7011 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7012 struct sched_domain *parent, int i)
7013{
7014 struct sched_domain *sd = parent;
1e9f28fa 7015#ifdef CONFIG_SCHED_MC
410c4081
AH
7016 sd = &per_cpu(core_domains, i).sd;
7017 SD_INIT(sd, MC);
7018 set_domain_attribute(sd, attr);
7019 cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
7020 sd->parent = parent;
7021 parent->child = sd;
7022 cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
1e9f28fa 7023#endif
410c4081
AH
7024 return sd;
7025}
1e9f28fa 7026
d8173535
AH
7027static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
7028 const struct cpumask *cpu_map, struct sched_domain_attr *attr,
7029 struct sched_domain *parent, int i)
7030{
7031 struct sched_domain *sd = parent;
1da177e4 7032#ifdef CONFIG_SCHED_SMT
d8173535
AH
7033 sd = &per_cpu(cpu_domains, i).sd;
7034 SD_INIT(sd, SIBLING);
7035 set_domain_attribute(sd, attr);
7036 cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
7037 sd->parent = parent;
7038 parent->child = sd;
7039 cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
1da177e4 7040#endif
d8173535
AH
7041 return sd;
7042}
1da177e4 7043
0e8e85c9
AH
7044static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
7045 const struct cpumask *cpu_map, int cpu)
7046{
7047 switch (l) {
1da177e4 7048#ifdef CONFIG_SCHED_SMT
0e8e85c9
AH
7049 case SD_LV_SIBLING: /* set up CPU (sibling) groups */
7050 cpumask_and(d->this_sibling_map, cpu_map,
7051 topology_thread_cpumask(cpu));
7052 if (cpu == cpumask_first(d->this_sibling_map))
7053 init_sched_build_groups(d->this_sibling_map, cpu_map,
7054 &cpu_to_cpu_group,
7055 d->send_covered, d->tmpmask);
7056 break;
1da177e4 7057#endif
1e9f28fa 7058#ifdef CONFIG_SCHED_MC
a2af04cd
AH
7059 case SD_LV_MC: /* set up multi-core groups */
7060 cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
7061 if (cpu == cpumask_first(d->this_core_map))
7062 init_sched_build_groups(d->this_core_map, cpu_map,
7063 &cpu_to_core_group,
7064 d->send_covered, d->tmpmask);
7065 break;
1e9f28fa 7066#endif
86548096
AH
7067 case SD_LV_CPU: /* set up physical groups */
7068 cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
7069 if (!cpumask_empty(d->nodemask))
7070 init_sched_build_groups(d->nodemask, cpu_map,
7071 &cpu_to_phys_group,
7072 d->send_covered, d->tmpmask);
7073 break;
1da177e4 7074#ifdef CONFIG_NUMA
de616e36
AH
7075 case SD_LV_ALLNODES:
7076 init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
7077 d->send_covered, d->tmpmask);
7078 break;
7079#endif
0e8e85c9
AH
7080 default:
7081 break;
7c16ec58 7082 }
0e8e85c9 7083}
9c1cfda2 7084
2109b99e
AH
7085/*
7086 * Build sched domains for a given set of cpus and attach the sched domains
7087 * to the individual cpus
7088 */
7089static int __build_sched_domains(const struct cpumask *cpu_map,
7090 struct sched_domain_attr *attr)
7091{
7092 enum s_alloc alloc_state = sa_none;
7093 struct s_data d;
294b0c96 7094 struct sched_domain *sd;
2109b99e 7095 int i;
7c16ec58 7096#ifdef CONFIG_NUMA
2109b99e 7097 d.sd_allnodes = 0;
7c16ec58 7098#endif
9c1cfda2 7099
2109b99e
AH
7100 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7101 if (alloc_state != sa_rootdomain)
7102 goto error;
7103 alloc_state = sa_sched_groups;
9c1cfda2 7104
1da177e4 7105 /*
1a20ff27 7106 * Set up domains for cpus specified by the cpu_map.
1da177e4 7107 */
abcd083a 7108 for_each_cpu(i, cpu_map) {
49a02c51
AH
7109 cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
7110 cpu_map);
9761eea8 7111
7f4588f3 7112 sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
87cce662 7113 sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
410c4081 7114 sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
d8173535 7115 sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
1da177e4 7116 }
9c1cfda2 7117
abcd083a 7118 for_each_cpu(i, cpu_map) {
0e8e85c9 7119 build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
a2af04cd 7120 build_sched_groups(&d, SD_LV_MC, cpu_map, i);
1da177e4 7121 }
9c1cfda2 7122
1da177e4 7123 /* Set up physical groups */
86548096
AH
7124 for (i = 0; i < nr_node_ids; i++)
7125 build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
9c1cfda2 7126
1da177e4
LT
7127#ifdef CONFIG_NUMA
7128 /* Set up node groups */
de616e36
AH
7129 if (d.sd_allnodes)
7130 build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
9c1cfda2 7131
0601a88d
AH
7132 for (i = 0; i < nr_node_ids; i++)
7133 if (build_numa_sched_groups(&d, cpu_map, i))
51888ca2 7134 goto error;
1da177e4
LT
7135#endif
7136
7137 /* Calculate CPU power for physical packages and nodes */
5c45bf27 7138#ifdef CONFIG_SCHED_SMT
abcd083a 7139 for_each_cpu(i, cpu_map) {
294b0c96 7140 sd = &per_cpu(cpu_domains, i).sd;
89c4710e 7141 init_sched_groups_power(i, sd);
5c45bf27 7142 }
1da177e4 7143#endif
1e9f28fa 7144#ifdef CONFIG_SCHED_MC
abcd083a 7145 for_each_cpu(i, cpu_map) {
294b0c96 7146 sd = &per_cpu(core_domains, i).sd;
89c4710e 7147 init_sched_groups_power(i, sd);
5c45bf27
SS
7148 }
7149#endif
1e9f28fa 7150
abcd083a 7151 for_each_cpu(i, cpu_map) {
294b0c96 7152 sd = &per_cpu(phys_domains, i).sd;
89c4710e 7153 init_sched_groups_power(i, sd);
1da177e4
LT
7154 }
7155
9c1cfda2 7156#ifdef CONFIG_NUMA
076ac2af 7157 for (i = 0; i < nr_node_ids; i++)
49a02c51 7158 init_numa_sched_groups_power(d.sched_group_nodes[i]);
9c1cfda2 7159
49a02c51 7160 if (d.sd_allnodes) {
6711cab4 7161 struct sched_group *sg;
f712c0c7 7162
96f874e2 7163 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
49a02c51 7164 d.tmpmask);
f712c0c7
SS
7165 init_numa_sched_groups_power(sg);
7166 }
9c1cfda2
JH
7167#endif
7168
1da177e4 7169 /* Attach the domains */
abcd083a 7170 for_each_cpu(i, cpu_map) {
1da177e4 7171#ifdef CONFIG_SCHED_SMT
6c99e9ad 7172 sd = &per_cpu(cpu_domains, i).sd;
1e9f28fa 7173#elif defined(CONFIG_SCHED_MC)
6c99e9ad 7174 sd = &per_cpu(core_domains, i).sd;
1da177e4 7175#else
6c99e9ad 7176 sd = &per_cpu(phys_domains, i).sd;
1da177e4 7177#endif
49a02c51 7178 cpu_attach_domain(sd, d.rd, i);
1da177e4 7179 }
51888ca2 7180
2109b99e
AH
7181 d.sched_group_nodes = NULL; /* don't free this we still need it */
7182 __free_domain_allocs(&d, sa_tmpmask, cpu_map);
7183 return 0;
51888ca2 7184
51888ca2 7185error:
2109b99e
AH
7186 __free_domain_allocs(&d, alloc_state, cpu_map);
7187 return -ENOMEM;
1da177e4 7188}
029190c5 7189
96f874e2 7190static int build_sched_domains(const struct cpumask *cpu_map)
1d3504fc
HS
7191{
7192 return __build_sched_domains(cpu_map, NULL);
7193}
7194
acc3f5d7 7195static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 7196static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
7197static struct sched_domain_attr *dattr_cur;
7198 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
7199
7200/*
7201 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
7202 * cpumask) fails, then fallback to a single sched domain,
7203 * as determined by the single cpumask fallback_doms.
029190c5 7204 */
4212823f 7205static cpumask_var_t fallback_doms;
029190c5 7206
ee79d1bd
HC
7207/*
7208 * arch_update_cpu_topology lets virtualized architectures update the
7209 * cpu core maps. It is supposed to return 1 if the topology changed
7210 * or 0 if it stayed the same.
7211 */
7212int __attribute__((weak)) arch_update_cpu_topology(void)
22e52b07 7213{
ee79d1bd 7214 return 0;
22e52b07
HC
7215}
7216
acc3f5d7
RR
7217cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7218{
7219 int i;
7220 cpumask_var_t *doms;
7221
7222 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7223 if (!doms)
7224 return NULL;
7225 for (i = 0; i < ndoms; i++) {
7226 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7227 free_sched_domains(doms, i);
7228 return NULL;
7229 }
7230 }
7231 return doms;
7232}
7233
7234void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7235{
7236 unsigned int i;
7237 for (i = 0; i < ndoms; i++)
7238 free_cpumask_var(doms[i]);
7239 kfree(doms);
7240}
7241
1a20ff27 7242/*
41a2d6cf 7243 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
7244 * For now this just excludes isolated cpus, but could be used to
7245 * exclude other special cases in the future.
1a20ff27 7246 */
96f874e2 7247static int arch_init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 7248{
7378547f
MM
7249 int err;
7250
22e52b07 7251 arch_update_cpu_topology();
029190c5 7252 ndoms_cur = 1;
acc3f5d7 7253 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 7254 if (!doms_cur)
acc3f5d7
RR
7255 doms_cur = &fallback_doms;
7256 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
1d3504fc 7257 dattr_cur = NULL;
acc3f5d7 7258 err = build_sched_domains(doms_cur[0]);
6382bc90 7259 register_sched_domain_sysctl();
7378547f
MM
7260
7261 return err;
1a20ff27
DG
7262}
7263
96f874e2
RR
7264static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7265 struct cpumask *tmpmask)
1da177e4 7266{
7c16ec58 7267 free_sched_groups(cpu_map, tmpmask);
9c1cfda2 7268}
1da177e4 7269
1a20ff27
DG
7270/*
7271 * Detach sched domains from a group of cpus specified in cpu_map
7272 * These cpus will now be attached to the NULL domain
7273 */
96f874e2 7274static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27 7275{
96f874e2
RR
7276 /* Save because hotplug lock held. */
7277 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
1a20ff27
DG
7278 int i;
7279
abcd083a 7280 for_each_cpu(i, cpu_map)
57d885fe 7281 cpu_attach_domain(NULL, &def_root_domain, i);
1a20ff27 7282 synchronize_sched();
96f874e2 7283 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
1a20ff27
DG
7284}
7285
1d3504fc
HS
7286/* handle null as "default" */
7287static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7288 struct sched_domain_attr *new, int idx_new)
7289{
7290 struct sched_domain_attr tmp;
7291
7292 /* fast path */
7293 if (!new && !cur)
7294 return 1;
7295
7296 tmp = SD_ATTR_INIT;
7297 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7298 new ? (new + idx_new) : &tmp,
7299 sizeof(struct sched_domain_attr));
7300}
7301
029190c5
PJ
7302/*
7303 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 7304 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
7305 * doms_new[] to the current sched domain partitioning, doms_cur[].
7306 * It destroys each deleted domain and builds each new domain.
7307 *
acc3f5d7 7308 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
7309 * The masks don't intersect (don't overlap.) We should setup one
7310 * sched domain for each mask. CPUs not in any of the cpumasks will
7311 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
7312 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7313 * it as it is.
7314 *
acc3f5d7
RR
7315 * The passed in 'doms_new' should be allocated using
7316 * alloc_sched_domains. This routine takes ownership of it and will
7317 * free_sched_domains it when done with it. If the caller failed the
7318 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7319 * and partition_sched_domains() will fallback to the single partition
7320 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 7321 *
96f874e2 7322 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
7323 * ndoms_new == 0 is a special case for destroying existing domains,
7324 * and it will not create the default domain.
dfb512ec 7325 *
029190c5
PJ
7326 * Call with hotplug lock held
7327 */
acc3f5d7 7328void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 7329 struct sched_domain_attr *dattr_new)
029190c5 7330{
dfb512ec 7331 int i, j, n;
d65bd5ec 7332 int new_topology;
029190c5 7333
712555ee 7334 mutex_lock(&sched_domains_mutex);
a1835615 7335
7378547f
MM
7336 /* always unregister in case we don't destroy any domains */
7337 unregister_sched_domain_sysctl();
7338
d65bd5ec
HC
7339 /* Let architecture update cpu core mappings. */
7340 new_topology = arch_update_cpu_topology();
7341
dfb512ec 7342 n = doms_new ? ndoms_new : 0;
029190c5
PJ
7343
7344 /* Destroy deleted domains */
7345 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 7346 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7347 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 7348 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
7349 goto match1;
7350 }
7351 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 7352 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
7353match1:
7354 ;
7355 }
7356
e761b772
MK
7357 if (doms_new == NULL) {
7358 ndoms_cur = 0;
acc3f5d7 7359 doms_new = &fallback_doms;
6ad4c188 7360 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 7361 WARN_ON_ONCE(dattr_new);
e761b772
MK
7362 }
7363
029190c5
PJ
7364 /* Build new domains */
7365 for (i = 0; i < ndoms_new; i++) {
d65bd5ec 7366 for (j = 0; j < ndoms_cur && !new_topology; j++) {
acc3f5d7 7367 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 7368 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
7369 goto match2;
7370 }
7371 /* no match - add a new doms_new */
acc3f5d7 7372 __build_sched_domains(doms_new[i],
1d3504fc 7373 dattr_new ? dattr_new + i : NULL);
029190c5
PJ
7374match2:
7375 ;
7376 }
7377
7378 /* Remember the new sched domains */
acc3f5d7
RR
7379 if (doms_cur != &fallback_doms)
7380 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 7381 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 7382 doms_cur = doms_new;
1d3504fc 7383 dattr_cur = dattr_new;
029190c5 7384 ndoms_cur = ndoms_new;
7378547f
MM
7385
7386 register_sched_domain_sysctl();
a1835615 7387
712555ee 7388 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
7389}
7390
5c45bf27 7391#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
c70f22d2 7392static void arch_reinit_sched_domains(void)
5c45bf27 7393{
95402b38 7394 get_online_cpus();
dfb512ec
MK
7395
7396 /* Destroy domains first to force the rebuild */
7397 partition_sched_domains(0, NULL, NULL);
7398
e761b772 7399 rebuild_sched_domains();
95402b38 7400 put_online_cpus();
5c45bf27
SS
7401}
7402
7403static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7404{
afb8a9b7 7405 unsigned int level = 0;
5c45bf27 7406
afb8a9b7
GS
7407 if (sscanf(buf, "%u", &level) != 1)
7408 return -EINVAL;
7409
7410 /*
7411 * level is always be positive so don't check for
7412 * level < POWERSAVINGS_BALANCE_NONE which is 0
7413 * What happens on 0 or 1 byte write,
7414 * need to check for count as well?
7415 */
7416
7417 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
5c45bf27
SS
7418 return -EINVAL;
7419
7420 if (smt)
afb8a9b7 7421 sched_smt_power_savings = level;
5c45bf27 7422 else
afb8a9b7 7423 sched_mc_power_savings = level;
5c45bf27 7424
c70f22d2 7425 arch_reinit_sched_domains();
5c45bf27 7426
c70f22d2 7427 return count;
5c45bf27
SS
7428}
7429
5c45bf27 7430#ifdef CONFIG_SCHED_MC
f718cd4a 7431static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
c9be0a36 7432 struct sysdev_class_attribute *attr,
f718cd4a 7433 char *page)
5c45bf27
SS
7434{
7435 return sprintf(page, "%u\n", sched_mc_power_savings);
7436}
f718cd4a 7437static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
c9be0a36 7438 struct sysdev_class_attribute *attr,
48f24c4d 7439 const char *buf, size_t count)
5c45bf27
SS
7440{
7441 return sched_power_savings_store(buf, count, 0);
7442}
f718cd4a
AK
7443static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7444 sched_mc_power_savings_show,
7445 sched_mc_power_savings_store);
5c45bf27
SS
7446#endif
7447
7448#ifdef CONFIG_SCHED_SMT
f718cd4a 7449static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
c9be0a36 7450 struct sysdev_class_attribute *attr,
f718cd4a 7451 char *page)
5c45bf27
SS
7452{
7453 return sprintf(page, "%u\n", sched_smt_power_savings);
7454}
f718cd4a 7455static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
c9be0a36 7456 struct sysdev_class_attribute *attr,
48f24c4d 7457 const char *buf, size_t count)
5c45bf27
SS
7458{
7459 return sched_power_savings_store(buf, count, 1);
7460}
f718cd4a
AK
7461static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7462 sched_smt_power_savings_show,
6707de00
AB
7463 sched_smt_power_savings_store);
7464#endif
7465
39aac648 7466int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6707de00
AB
7467{
7468 int err = 0;
7469
7470#ifdef CONFIG_SCHED_SMT
7471 if (smt_capable())
7472 err = sysfs_create_file(&cls->kset.kobj,
7473 &attr_sched_smt_power_savings.attr);
7474#endif
7475#ifdef CONFIG_SCHED_MC
7476 if (!err && mc_capable())
7477 err = sysfs_create_file(&cls->kset.kobj,
7478 &attr_sched_mc_power_savings.attr);
7479#endif
7480 return err;
7481}
6d6bc0ad 7482#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
5c45bf27 7483
1da177e4 7484/*
3a101d05
TH
7485 * Update cpusets according to cpu_active mask. If cpusets are
7486 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7487 * around partition_sched_domains().
1da177e4 7488 */
0b2e918a
TH
7489static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7490 void *hcpu)
e761b772 7491{
3a101d05 7492 switch (action & ~CPU_TASKS_FROZEN) {
e761b772 7493 case CPU_ONLINE:
6ad4c188 7494 case CPU_DOWN_FAILED:
3a101d05 7495 cpuset_update_active_cpus();
e761b772 7496 return NOTIFY_OK;
3a101d05
TH
7497 default:
7498 return NOTIFY_DONE;
7499 }
7500}
e761b772 7501
0b2e918a
TH
7502static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7503 void *hcpu)
3a101d05
TH
7504{
7505 switch (action & ~CPU_TASKS_FROZEN) {
7506 case CPU_DOWN_PREPARE:
7507 cpuset_update_active_cpus();
7508 return NOTIFY_OK;
e761b772
MK
7509 default:
7510 return NOTIFY_DONE;
7511 }
7512}
e761b772
MK
7513
7514static int update_runtime(struct notifier_block *nfb,
7515 unsigned long action, void *hcpu)
1da177e4 7516{
7def2be1
PZ
7517 int cpu = (int)(long)hcpu;
7518
1da177e4 7519 switch (action) {
1da177e4 7520 case CPU_DOWN_PREPARE:
8bb78442 7521 case CPU_DOWN_PREPARE_FROZEN:
7def2be1 7522 disable_runtime(cpu_rq(cpu));
1da177e4
LT
7523 return NOTIFY_OK;
7524
1da177e4 7525 case CPU_DOWN_FAILED:
8bb78442 7526 case CPU_DOWN_FAILED_FROZEN:
1da177e4 7527 case CPU_ONLINE:
8bb78442 7528 case CPU_ONLINE_FROZEN:
7def2be1 7529 enable_runtime(cpu_rq(cpu));
e761b772
MK
7530 return NOTIFY_OK;
7531
1da177e4
LT
7532 default:
7533 return NOTIFY_DONE;
7534 }
1da177e4 7535}
1da177e4
LT
7536
7537void __init sched_init_smp(void)
7538{
dcc30a35
RR
7539 cpumask_var_t non_isolated_cpus;
7540
7541 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 7542 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 7543
434d53b0
MT
7544#if defined(CONFIG_NUMA)
7545 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
7546 GFP_KERNEL);
7547 BUG_ON(sched_group_nodes_bycpu == NULL);
7548#endif
95402b38 7549 get_online_cpus();
712555ee 7550 mutex_lock(&sched_domains_mutex);
6ad4c188 7551 arch_init_sched_domains(cpu_active_mask);
dcc30a35
RR
7552 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7553 if (cpumask_empty(non_isolated_cpus))
7554 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 7555 mutex_unlock(&sched_domains_mutex);
95402b38 7556 put_online_cpus();
e761b772 7557
3a101d05
TH
7558 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7559 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772
MK
7560
7561 /* RT runtime code needs to handle some hotplug events */
7562 hotcpu_notifier(update_runtime, 0);
7563
b328ca18 7564 init_hrtick();
5c1e1767
NP
7565
7566 /* Move init over to a non-isolated CPU */
dcc30a35 7567 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 7568 BUG();
19978ca6 7569 sched_init_granularity();
dcc30a35 7570 free_cpumask_var(non_isolated_cpus);
4212823f 7571
0e3900e6 7572 init_sched_rt_class();
1da177e4
LT
7573}
7574#else
7575void __init sched_init_smp(void)
7576{
19978ca6 7577 sched_init_granularity();
1da177e4
LT
7578}
7579#endif /* CONFIG_SMP */
7580
cd1bb94b
AB
7581const_debug unsigned int sysctl_timer_migration = 1;
7582
1da177e4
LT
7583int in_sched_functions(unsigned long addr)
7584{
1da177e4
LT
7585 return in_lock_functions(addr) ||
7586 (addr >= (unsigned long)__sched_text_start
7587 && addr < (unsigned long)__sched_text_end);
7588}
7589
a9957449 7590static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
dd41f596
IM
7591{
7592 cfs_rq->tasks_timeline = RB_ROOT;
4a55bd5e 7593 INIT_LIST_HEAD(&cfs_rq->tasks);
dd41f596
IM
7594#ifdef CONFIG_FAIR_GROUP_SCHED
7595 cfs_rq->rq = rq;
7596#endif
67e9fb2a 7597 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
dd41f596
IM
7598}
7599
fa85ae24
PZ
7600static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7601{
7602 struct rt_prio_array *array;
7603 int i;
7604
7605 array = &rt_rq->active;
7606 for (i = 0; i < MAX_RT_PRIO; i++) {
7607 INIT_LIST_HEAD(array->queue + i);
7608 __clear_bit(i, array->bitmap);
7609 }
7610 /* delimiter for bitsearch: */
7611 __set_bit(MAX_RT_PRIO, array->bitmap);
7612
052f1dc7 7613#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
e864c499 7614 rt_rq->highest_prio.curr = MAX_RT_PRIO;
398a153b 7615#ifdef CONFIG_SMP
e864c499 7616 rt_rq->highest_prio.next = MAX_RT_PRIO;
48d5e258 7617#endif
48d5e258 7618#endif
fa85ae24
PZ
7619#ifdef CONFIG_SMP
7620 rt_rq->rt_nr_migratory = 0;
fa85ae24 7621 rt_rq->overloaded = 0;
05fa785c 7622 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
fa85ae24
PZ
7623#endif
7624
7625 rt_rq->rt_time = 0;
7626 rt_rq->rt_throttled = 0;
ac086bc2 7627 rt_rq->rt_runtime = 0;
0986b11b 7628 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
6f505b16 7629
052f1dc7 7630#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc 7631 rt_rq->rt_nr_boosted = 0;
6f505b16
PZ
7632 rt_rq->rq = rq;
7633#endif
fa85ae24
PZ
7634}
7635
6f505b16 7636#ifdef CONFIG_FAIR_GROUP_SCHED
ec7dc8ac
DG
7637static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7638 struct sched_entity *se, int cpu, int add,
7639 struct sched_entity *parent)
6f505b16 7640{
ec7dc8ac 7641 struct rq *rq = cpu_rq(cpu);
6f505b16
PZ
7642 tg->cfs_rq[cpu] = cfs_rq;
7643 init_cfs_rq(cfs_rq, rq);
7644 cfs_rq->tg = tg;
7645 if (add)
7646 list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
7647
7648 tg->se[cpu] = se;
354d60c2
DG
7649 /* se could be NULL for init_task_group */
7650 if (!se)
7651 return;
7652
ec7dc8ac
DG
7653 if (!parent)
7654 se->cfs_rq = &rq->cfs;
7655 else
7656 se->cfs_rq = parent->my_q;
7657
6f505b16
PZ
7658 se->my_q = cfs_rq;
7659 se->load.weight = tg->shares;
e05510d0 7660 se->load.inv_weight = 0;
ec7dc8ac 7661 se->parent = parent;
6f505b16 7662}
052f1dc7 7663#endif
6f505b16 7664
052f1dc7 7665#ifdef CONFIG_RT_GROUP_SCHED
ec7dc8ac
DG
7666static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7667 struct sched_rt_entity *rt_se, int cpu, int add,
7668 struct sched_rt_entity *parent)
6f505b16 7669{
ec7dc8ac
DG
7670 struct rq *rq = cpu_rq(cpu);
7671
6f505b16
PZ
7672 tg->rt_rq[cpu] = rt_rq;
7673 init_rt_rq(rt_rq, rq);
7674 rt_rq->tg = tg;
ac086bc2 7675 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
6f505b16
PZ
7676 if (add)
7677 list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
7678
7679 tg->rt_se[cpu] = rt_se;
354d60c2
DG
7680 if (!rt_se)
7681 return;
7682
ec7dc8ac
DG
7683 if (!parent)
7684 rt_se->rt_rq = &rq->rt;
7685 else
7686 rt_se->rt_rq = parent->my_q;
7687
6f505b16 7688 rt_se->my_q = rt_rq;
ec7dc8ac 7689 rt_se->parent = parent;
6f505b16
PZ
7690 INIT_LIST_HEAD(&rt_se->run_list);
7691}
7692#endif
7693
1da177e4
LT
7694void __init sched_init(void)
7695{
dd41f596 7696 int i, j;
434d53b0
MT
7697 unsigned long alloc_size = 0, ptr;
7698
7699#ifdef CONFIG_FAIR_GROUP_SCHED
7700 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7701#endif
7702#ifdef CONFIG_RT_GROUP_SCHED
7703 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
eff766a6 7704#endif
df7c8e84 7705#ifdef CONFIG_CPUMASK_OFFSTACK
8c083f08 7706 alloc_size += num_possible_cpus() * cpumask_size();
434d53b0 7707#endif
434d53b0 7708 if (alloc_size) {
36b7b6d4 7709 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
7710
7711#ifdef CONFIG_FAIR_GROUP_SCHED
7712 init_task_group.se = (struct sched_entity **)ptr;
7713 ptr += nr_cpu_ids * sizeof(void **);
7714
7715 init_task_group.cfs_rq = (struct cfs_rq **)ptr;
7716 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 7717
6d6bc0ad 7718#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0
MT
7719#ifdef CONFIG_RT_GROUP_SCHED
7720 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
7721 ptr += nr_cpu_ids * sizeof(void **);
7722
7723 init_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
7724 ptr += nr_cpu_ids * sizeof(void **);
7725
6d6bc0ad 7726#endif /* CONFIG_RT_GROUP_SCHED */
df7c8e84
RR
7727#ifdef CONFIG_CPUMASK_OFFSTACK
7728 for_each_possible_cpu(i) {
7729 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
7730 ptr += cpumask_size();
7731 }
7732#endif /* CONFIG_CPUMASK_OFFSTACK */
434d53b0 7733 }
dd41f596 7734
57d885fe
GH
7735#ifdef CONFIG_SMP
7736 init_defrootdomain();
7737#endif
7738
d0b27fa7
PZ
7739 init_rt_bandwidth(&def_rt_bandwidth,
7740 global_rt_period(), global_rt_runtime());
7741
7742#ifdef CONFIG_RT_GROUP_SCHED
7743 init_rt_bandwidth(&init_task_group.rt_bandwidth,
7744 global_rt_period(), global_rt_runtime());
6d6bc0ad 7745#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7746
7c941438 7747#ifdef CONFIG_CGROUP_SCHED
6f505b16 7748 list_add(&init_task_group.list, &task_groups);
f473aa5e
PZ
7749 INIT_LIST_HEAD(&init_task_group.children);
7750
7c941438 7751#endif /* CONFIG_CGROUP_SCHED */
6f505b16 7752
4a6cc4bd
JK
7753#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
7754 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
7755 __alignof__(unsigned long));
7756#endif
0a945022 7757 for_each_possible_cpu(i) {
70b97a7f 7758 struct rq *rq;
1da177e4
LT
7759
7760 rq = cpu_rq(i);
05fa785c 7761 raw_spin_lock_init(&rq->lock);
7897986b 7762 rq->nr_running = 0;
dce48a84
TG
7763 rq->calc_load_active = 0;
7764 rq->calc_load_update = jiffies + LOAD_FREQ;
dd41f596 7765 init_cfs_rq(&rq->cfs, rq);
6f505b16 7766 init_rt_rq(&rq->rt, rq);
dd41f596 7767#ifdef CONFIG_FAIR_GROUP_SCHED
4cf86d77 7768 init_task_group.shares = init_task_group_load;
6f505b16 7769 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2
DG
7770#ifdef CONFIG_CGROUP_SCHED
7771 /*
7772 * How much cpu bandwidth does init_task_group get?
7773 *
7774 * In case of task-groups formed thr' the cgroup filesystem, it
7775 * gets 100% of the cpu resources in the system. This overall
7776 * system cpu resource is divided among the tasks of
7777 * init_task_group and its child task-groups in a fair manner,
7778 * based on each entity's (task or task-group's) weight
7779 * (se->load.weight).
7780 *
7781 * In other words, if init_task_group has 10 tasks of weight
7782 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7783 * then A0's share of the cpu resource is:
7784 *
0d905bca 7785 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2
DG
7786 *
7787 * We achieve this by letting init_task_group's tasks sit
7788 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
7789 */
ec7dc8ac 7790 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
052f1dc7 7791#endif
354d60c2
DG
7792#endif /* CONFIG_FAIR_GROUP_SCHED */
7793
7794 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 7795#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 7796 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
354d60c2 7797#ifdef CONFIG_CGROUP_SCHED
ec7dc8ac 7798 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
354d60c2 7799#endif
dd41f596 7800#endif
1da177e4 7801
dd41f596
IM
7802 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7803 rq->cpu_load[j] = 0;
fdf3e95d
VP
7804
7805 rq->last_load_update_tick = jiffies;
7806
1da177e4 7807#ifdef CONFIG_SMP
41c7ce9a 7808 rq->sd = NULL;
57d885fe 7809 rq->rd = NULL;
e51fd5e2 7810 rq->cpu_power = SCHED_LOAD_SCALE;
3f029d3c 7811 rq->post_schedule = 0;
1da177e4 7812 rq->active_balance = 0;
dd41f596 7813 rq->next_balance = jiffies;
1da177e4 7814 rq->push_cpu = 0;
0a2966b4 7815 rq->cpu = i;
1f11eb6a 7816 rq->online = 0;
eae0c9df
MG
7817 rq->idle_stamp = 0;
7818 rq->avg_idle = 2*sysctl_sched_migration_cost;
dc938520 7819 rq_attach_root(rq, &def_root_domain);
83cd4fe2
VP
7820#ifdef CONFIG_NO_HZ
7821 rq->nohz_balance_kick = 0;
7822 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
7823#endif
1da177e4 7824#endif
8f4d37ec 7825 init_rq_hrtick(rq);
1da177e4 7826 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
7827 }
7828
2dd73a4f 7829 set_load_weight(&init_task);
b50f60ce 7830
e107be36
AK
7831#ifdef CONFIG_PREEMPT_NOTIFIERS
7832 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7833#endif
7834
c9819f45 7835#ifdef CONFIG_SMP
962cf36c 7836 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
c9819f45
CL
7837#endif
7838
b50f60ce 7839#ifdef CONFIG_RT_MUTEXES
1d615482 7840 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
b50f60ce
HC
7841#endif
7842
1da177e4
LT
7843 /*
7844 * The boot idle thread does lazy MMU switching as well:
7845 */
7846 atomic_inc(&init_mm.mm_count);
7847 enter_lazy_tlb(&init_mm, current);
7848
7849 /*
7850 * Make us the idle thread. Technically, schedule() should not be
7851 * called from this thread, however somewhere below it might be,
7852 * but because we are the idle thread, we just pick up running again
7853 * when this runqueue becomes "idle".
7854 */
7855 init_idle(current, smp_processor_id());
dce48a84
TG
7856
7857 calc_load_update = jiffies + LOAD_FREQ;
7858
dd41f596
IM
7859 /*
7860 * During early bootup we pretend to be a normal task:
7861 */
7862 current->sched_class = &fair_sched_class;
6892b75e 7863
6a7b3dc3 7864 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
49557e62 7865 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
bf4d83f6 7866#ifdef CONFIG_SMP
7d1e6a9b 7867#ifdef CONFIG_NO_HZ
83cd4fe2
VP
7868 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
7869 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
7870 atomic_set(&nohz.load_balancer, nr_cpu_ids);
7871 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
7872 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
7d1e6a9b 7873#endif
bdddd296
RR
7874 /* May be allocated at isolcpus cmdline parse time */
7875 if (cpu_isolated_map == NULL)
7876 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
bf4d83f6 7877#endif /* SMP */
6a7b3dc3 7878
cdd6c482 7879 perf_event_init();
0d905bca 7880
6892b75e 7881 scheduler_running = 1;
1da177e4
LT
7882}
7883
7884#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
e4aafea2
FW
7885static inline int preempt_count_equals(int preempt_offset)
7886{
234da7bc 7887 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2
FW
7888
7889 return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
7890}
7891
d894837f 7892void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 7893{
48f24c4d 7894#ifdef in_atomic
1da177e4
LT
7895 static unsigned long prev_jiffy; /* ratelimiting */
7896
e4aafea2
FW
7897 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
7898 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
7899 return;
7900 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
7901 return;
7902 prev_jiffy = jiffies;
7903
3df0fc5b
PZ
7904 printk(KERN_ERR
7905 "BUG: sleeping function called from invalid context at %s:%d\n",
7906 file, line);
7907 printk(KERN_ERR
7908 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
7909 in_atomic(), irqs_disabled(),
7910 current->pid, current->comm);
aef745fc
IM
7911
7912 debug_show_held_locks(current);
7913 if (irqs_disabled())
7914 print_irqtrace_events(current);
7915 dump_stack();
1da177e4
LT
7916#endif
7917}
7918EXPORT_SYMBOL(__might_sleep);
7919#endif
7920
7921#ifdef CONFIG_MAGIC_SYSRQ
3a5e4dc1
AK
7922static void normalize_task(struct rq *rq, struct task_struct *p)
7923{
7924 int on_rq;
3e51f33f 7925
3a5e4dc1
AK
7926 on_rq = p->se.on_rq;
7927 if (on_rq)
7928 deactivate_task(rq, p, 0);
7929 __setscheduler(rq, p, SCHED_NORMAL, 0);
7930 if (on_rq) {
7931 activate_task(rq, p, 0);
7932 resched_task(rq->curr);
7933 }
7934}
7935
1da177e4
LT
7936void normalize_rt_tasks(void)
7937{
a0f98a1c 7938 struct task_struct *g, *p;
1da177e4 7939 unsigned long flags;
70b97a7f 7940 struct rq *rq;
1da177e4 7941
4cf5d77a 7942 read_lock_irqsave(&tasklist_lock, flags);
a0f98a1c 7943 do_each_thread(g, p) {
178be793
IM
7944 /*
7945 * Only normalize user tasks:
7946 */
7947 if (!p->mm)
7948 continue;
7949
6cfb0d5d 7950 p->se.exec_start = 0;
6cfb0d5d 7951#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
7952 p->se.statistics.wait_start = 0;
7953 p->se.statistics.sleep_start = 0;
7954 p->se.statistics.block_start = 0;
6cfb0d5d 7955#endif
dd41f596
IM
7956
7957 if (!rt_task(p)) {
7958 /*
7959 * Renice negative nice level userspace
7960 * tasks back to 0:
7961 */
7962 if (TASK_NICE(p) < 0 && p->mm)
7963 set_user_nice(p, 0);
1da177e4 7964 continue;
dd41f596 7965 }
1da177e4 7966
1d615482 7967 raw_spin_lock(&p->pi_lock);
b29739f9 7968 rq = __task_rq_lock(p);
1da177e4 7969
178be793 7970 normalize_task(rq, p);
3a5e4dc1 7971
b29739f9 7972 __task_rq_unlock(rq);
1d615482 7973 raw_spin_unlock(&p->pi_lock);
a0f98a1c
IM
7974 } while_each_thread(g, p);
7975
4cf5d77a 7976 read_unlock_irqrestore(&tasklist_lock, flags);
1da177e4
LT
7977}
7978
7979#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 7980
67fc4e0c 7981#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 7982/*
67fc4e0c 7983 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
7984 *
7985 * They can only be called when the whole system has been
7986 * stopped - every CPU needs to be quiescent, and no scheduling
7987 * activity can take place. Using them for anything else would
7988 * be a serious bug, and as a result, they aren't even visible
7989 * under any other configuration.
7990 */
7991
7992/**
7993 * curr_task - return the current task for a given cpu.
7994 * @cpu: the processor in question.
7995 *
7996 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7997 */
36c8b586 7998struct task_struct *curr_task(int cpu)
1df5c10a
LT
7999{
8000 return cpu_curr(cpu);
8001}
8002
67fc4e0c
JW
8003#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8004
8005#ifdef CONFIG_IA64
1df5c10a
LT
8006/**
8007 * set_curr_task - set the current task for a given cpu.
8008 * @cpu: the processor in question.
8009 * @p: the task pointer to set.
8010 *
8011 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
8012 * are serviced on a separate stack. It allows the architecture to switch the
8013 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
8014 * must be called with all CPU's synchronized, and interrupts disabled, the
8015 * and caller must save the original value of the current task (see
8016 * curr_task() above) and restore that value before reenabling interrupts and
8017 * re-starting the system.
8018 *
8019 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8020 */
36c8b586 8021void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
8022{
8023 cpu_curr(cpu) = p;
8024}
8025
8026#endif
29f59db3 8027
bccbe08a
PZ
8028#ifdef CONFIG_FAIR_GROUP_SCHED
8029static void free_fair_sched_group(struct task_group *tg)
6f505b16
PZ
8030{
8031 int i;
8032
8033 for_each_possible_cpu(i) {
8034 if (tg->cfs_rq)
8035 kfree(tg->cfs_rq[i]);
8036 if (tg->se)
8037 kfree(tg->se[i]);
6f505b16
PZ
8038 }
8039
8040 kfree(tg->cfs_rq);
8041 kfree(tg->se);
6f505b16
PZ
8042}
8043
ec7dc8ac
DG
8044static
8045int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
29f59db3 8046{
29f59db3 8047 struct cfs_rq *cfs_rq;
eab17229 8048 struct sched_entity *se;
9b5b7751 8049 struct rq *rq;
29f59db3
SV
8050 int i;
8051
434d53b0 8052 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
29f59db3
SV
8053 if (!tg->cfs_rq)
8054 goto err;
434d53b0 8055 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
29f59db3
SV
8056 if (!tg->se)
8057 goto err;
052f1dc7
PZ
8058
8059 tg->shares = NICE_0_LOAD;
29f59db3
SV
8060
8061 for_each_possible_cpu(i) {
9b5b7751 8062 rq = cpu_rq(i);
29f59db3 8063
eab17229
LZ
8064 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8065 GFP_KERNEL, cpu_to_node(i));
29f59db3
SV
8066 if (!cfs_rq)
8067 goto err;
8068
eab17229
LZ
8069 se = kzalloc_node(sizeof(struct sched_entity),
8070 GFP_KERNEL, cpu_to_node(i));
29f59db3 8071 if (!se)
dfc12eb2 8072 goto err_free_rq;
29f59db3 8073
eab17229 8074 init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
bccbe08a
PZ
8075 }
8076
8077 return 1;
8078
dfc12eb2
PC
8079 err_free_rq:
8080 kfree(cfs_rq);
bccbe08a
PZ
8081 err:
8082 return 0;
8083}
8084
8085static inline void register_fair_sched_group(struct task_group *tg, int cpu)
8086{
8087 list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
8088 &cpu_rq(cpu)->leaf_cfs_rq_list);
8089}
8090
8091static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8092{
8093 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
8094}
6d6bc0ad 8095#else /* !CONFG_FAIR_GROUP_SCHED */
bccbe08a
PZ
8096static inline void free_fair_sched_group(struct task_group *tg)
8097{
8098}
8099
ec7dc8ac
DG
8100static inline
8101int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
8102{
8103 return 1;
8104}
8105
8106static inline void register_fair_sched_group(struct task_group *tg, int cpu)
8107{
8108}
8109
8110static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8111{
8112}
6d6bc0ad 8113#endif /* CONFIG_FAIR_GROUP_SCHED */
052f1dc7
PZ
8114
8115#ifdef CONFIG_RT_GROUP_SCHED
bccbe08a
PZ
8116static void free_rt_sched_group(struct task_group *tg)
8117{
8118 int i;
8119
d0b27fa7
PZ
8120 destroy_rt_bandwidth(&tg->rt_bandwidth);
8121
bccbe08a
PZ
8122 for_each_possible_cpu(i) {
8123 if (tg->rt_rq)
8124 kfree(tg->rt_rq[i]);
8125 if (tg->rt_se)
8126 kfree(tg->rt_se[i]);
8127 }
8128
8129 kfree(tg->rt_rq);
8130 kfree(tg->rt_se);
8131}
8132
ec7dc8ac
DG
8133static
8134int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
8135{
8136 struct rt_rq *rt_rq;
eab17229 8137 struct sched_rt_entity *rt_se;
bccbe08a
PZ
8138 struct rq *rq;
8139 int i;
8140
434d53b0 8141 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
bccbe08a
PZ
8142 if (!tg->rt_rq)
8143 goto err;
434d53b0 8144 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
bccbe08a
PZ
8145 if (!tg->rt_se)
8146 goto err;
8147
d0b27fa7
PZ
8148 init_rt_bandwidth(&tg->rt_bandwidth,
8149 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
bccbe08a
PZ
8150
8151 for_each_possible_cpu(i) {
8152 rq = cpu_rq(i);
8153
eab17229
LZ
8154 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8155 GFP_KERNEL, cpu_to_node(i));
6f505b16
PZ
8156 if (!rt_rq)
8157 goto err;
29f59db3 8158
eab17229
LZ
8159 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8160 GFP_KERNEL, cpu_to_node(i));
6f505b16 8161 if (!rt_se)
dfc12eb2 8162 goto err_free_rq;
29f59db3 8163
eab17229 8164 init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
29f59db3
SV
8165 }
8166
bccbe08a
PZ
8167 return 1;
8168
dfc12eb2
PC
8169 err_free_rq:
8170 kfree(rt_rq);
bccbe08a
PZ
8171 err:
8172 return 0;
8173}
8174
8175static inline void register_rt_sched_group(struct task_group *tg, int cpu)
8176{
8177 list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
8178 &cpu_rq(cpu)->leaf_rt_rq_list);
8179}
8180
8181static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8182{
8183 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
8184}
6d6bc0ad 8185#else /* !CONFIG_RT_GROUP_SCHED */
bccbe08a
PZ
8186static inline void free_rt_sched_group(struct task_group *tg)
8187{
8188}
8189
ec7dc8ac
DG
8190static inline
8191int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
8192{
8193 return 1;
8194}
8195
8196static inline void register_rt_sched_group(struct task_group *tg, int cpu)
8197{
8198}
8199
8200static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8201{
8202}
6d6bc0ad 8203#endif /* CONFIG_RT_GROUP_SCHED */
bccbe08a 8204
7c941438 8205#ifdef CONFIG_CGROUP_SCHED
bccbe08a
PZ
8206static void free_sched_group(struct task_group *tg)
8207{
8208 free_fair_sched_group(tg);
8209 free_rt_sched_group(tg);
8210 kfree(tg);
8211}
8212
8213/* allocate runqueue etc for a new task group */
ec7dc8ac 8214struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
8215{
8216 struct task_group *tg;
8217 unsigned long flags;
8218 int i;
8219
8220 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8221 if (!tg)
8222 return ERR_PTR(-ENOMEM);
8223
ec7dc8ac 8224 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
8225 goto err;
8226
ec7dc8ac 8227 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
8228 goto err;
8229
8ed36996 8230 spin_lock_irqsave(&task_group_lock, flags);
9b5b7751 8231 for_each_possible_cpu(i) {
bccbe08a
PZ
8232 register_fair_sched_group(tg, i);
8233 register_rt_sched_group(tg, i);
9b5b7751 8234 }
6f505b16 8235 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
8236
8237 WARN_ON(!parent); /* root should already exist */
8238
8239 tg->parent = parent;
f473aa5e 8240 INIT_LIST_HEAD(&tg->children);
09f2724a 8241 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 8242 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3 8243
9b5b7751 8244 return tg;
29f59db3
SV
8245
8246err:
6f505b16 8247 free_sched_group(tg);
29f59db3
SV
8248 return ERR_PTR(-ENOMEM);
8249}
8250
9b5b7751 8251/* rcu callback to free various structures associated with a task group */
6f505b16 8252static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 8253{
29f59db3 8254 /* now it should be safe to free those cfs_rqs */
6f505b16 8255 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
8256}
8257
9b5b7751 8258/* Destroy runqueue etc associated with a task group */
4cf86d77 8259void sched_destroy_group(struct task_group *tg)
29f59db3 8260{
8ed36996 8261 unsigned long flags;
9b5b7751 8262 int i;
29f59db3 8263
8ed36996 8264 spin_lock_irqsave(&task_group_lock, flags);
9b5b7751 8265 for_each_possible_cpu(i) {
bccbe08a
PZ
8266 unregister_fair_sched_group(tg, i);
8267 unregister_rt_sched_group(tg, i);
9b5b7751 8268 }
6f505b16 8269 list_del_rcu(&tg->list);
f473aa5e 8270 list_del_rcu(&tg->siblings);
8ed36996 8271 spin_unlock_irqrestore(&task_group_lock, flags);
9b5b7751 8272
9b5b7751 8273 /* wait for possible concurrent references to cfs_rqs complete */
6f505b16 8274 call_rcu(&tg->rcu, free_sched_group_rcu);
29f59db3
SV
8275}
8276
9b5b7751 8277/* change task's runqueue when it moves between groups.
3a252015
IM
8278 * The caller of this function should have put the task in its new group
8279 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8280 * reflect its new group.
9b5b7751
SV
8281 */
8282void sched_move_task(struct task_struct *tsk)
29f59db3
SV
8283{
8284 int on_rq, running;
8285 unsigned long flags;
8286 struct rq *rq;
8287
8288 rq = task_rq_lock(tsk, &flags);
8289
051a1d1a 8290 running = task_current(rq, tsk);
29f59db3
SV
8291 on_rq = tsk->se.on_rq;
8292
0e1f3483 8293 if (on_rq)
29f59db3 8294 dequeue_task(rq, tsk, 0);
0e1f3483
HS
8295 if (unlikely(running))
8296 tsk->sched_class->put_prev_task(rq, tsk);
29f59db3 8297
6f505b16 8298 set_task_rq(tsk, task_cpu(tsk));
29f59db3 8299
810b3817
PZ
8300#ifdef CONFIG_FAIR_GROUP_SCHED
8301 if (tsk->sched_class->moved_group)
88ec22d3 8302 tsk->sched_class->moved_group(tsk, on_rq);
810b3817
PZ
8303#endif
8304
0e1f3483
HS
8305 if (unlikely(running))
8306 tsk->sched_class->set_curr_task(rq);
8307 if (on_rq)
371fd7e7 8308 enqueue_task(rq, tsk, 0);
29f59db3 8309
29f59db3
SV
8310 task_rq_unlock(rq, &flags);
8311}
7c941438 8312#endif /* CONFIG_CGROUP_SCHED */
29f59db3 8313
052f1dc7 8314#ifdef CONFIG_FAIR_GROUP_SCHED
c09595f6 8315static void __set_se_shares(struct sched_entity *se, unsigned long shares)
29f59db3
SV
8316{
8317 struct cfs_rq *cfs_rq = se->cfs_rq;
29f59db3
SV
8318 int on_rq;
8319
29f59db3 8320 on_rq = se->on_rq;
62fb1851 8321 if (on_rq)
29f59db3
SV
8322 dequeue_entity(cfs_rq, se, 0);
8323
8324 se->load.weight = shares;
e05510d0 8325 se->load.inv_weight = 0;
29f59db3 8326
62fb1851 8327 if (on_rq)
29f59db3 8328 enqueue_entity(cfs_rq, se, 0);
c09595f6 8329}
62fb1851 8330
c09595f6
PZ
8331static void set_se_shares(struct sched_entity *se, unsigned long shares)
8332{
8333 struct cfs_rq *cfs_rq = se->cfs_rq;
8334 struct rq *rq = cfs_rq->rq;
8335 unsigned long flags;
8336
05fa785c 8337 raw_spin_lock_irqsave(&rq->lock, flags);
c09595f6 8338 __set_se_shares(se, shares);
05fa785c 8339 raw_spin_unlock_irqrestore(&rq->lock, flags);
29f59db3
SV
8340}
8341
8ed36996
PZ
8342static DEFINE_MUTEX(shares_mutex);
8343
4cf86d77 8344int sched_group_set_shares(struct task_group *tg, unsigned long shares)
29f59db3
SV
8345{
8346 int i;
8ed36996 8347 unsigned long flags;
c61935fd 8348
ec7dc8ac
DG
8349 /*
8350 * We can't change the weight of the root cgroup.
8351 */
8352 if (!tg->se[0])
8353 return -EINVAL;
8354
18d95a28
PZ
8355 if (shares < MIN_SHARES)
8356 shares = MIN_SHARES;
cb4ad1ff
MX
8357 else if (shares > MAX_SHARES)
8358 shares = MAX_SHARES;
62fb1851 8359
8ed36996 8360 mutex_lock(&shares_mutex);
9b5b7751 8361 if (tg->shares == shares)
5cb350ba 8362 goto done;
29f59db3 8363
8ed36996 8364 spin_lock_irqsave(&task_group_lock, flags);
bccbe08a
PZ
8365 for_each_possible_cpu(i)
8366 unregister_fair_sched_group(tg, i);
f473aa5e 8367 list_del_rcu(&tg->siblings);
8ed36996 8368 spin_unlock_irqrestore(&task_group_lock, flags);
6b2d7700
SV
8369
8370 /* wait for any ongoing reference to this group to finish */
8371 synchronize_sched();
8372
8373 /*
8374 * Now we are free to modify the group's share on each cpu
8375 * w/o tripping rebalance_share or load_balance_fair.
8376 */
9b5b7751 8377 tg->shares = shares;
c09595f6
PZ
8378 for_each_possible_cpu(i) {
8379 /*
8380 * force a rebalance
8381 */
8382 cfs_rq_set_shares(tg->cfs_rq[i], 0);
cb4ad1ff 8383 set_se_shares(tg->se[i], shares);
c09595f6 8384 }
29f59db3 8385
6b2d7700
SV
8386 /*
8387 * Enable load balance activity on this group, by inserting it back on
8388 * each cpu's rq->leaf_cfs_rq_list.
8389 */
8ed36996 8390 spin_lock_irqsave(&task_group_lock, flags);
bccbe08a
PZ
8391 for_each_possible_cpu(i)
8392 register_fair_sched_group(tg, i);
f473aa5e 8393 list_add_rcu(&tg->siblings, &tg->parent->children);
8ed36996 8394 spin_unlock_irqrestore(&task_group_lock, flags);
5cb350ba 8395done:
8ed36996 8396 mutex_unlock(&shares_mutex);
9b5b7751 8397 return 0;
29f59db3
SV
8398}
8399
5cb350ba
DG
8400unsigned long sched_group_shares(struct task_group *tg)
8401{
8402 return tg->shares;
8403}
052f1dc7 8404#endif
5cb350ba 8405
052f1dc7 8406#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8407/*
9f0c1e56 8408 * Ensure that the real time constraints are schedulable.
6f505b16 8409 */
9f0c1e56
PZ
8410static DEFINE_MUTEX(rt_constraints_mutex);
8411
8412static unsigned long to_ratio(u64 period, u64 runtime)
8413{
8414 if (runtime == RUNTIME_INF)
9a7e0b18 8415 return 1ULL << 20;
9f0c1e56 8416
9a7e0b18 8417 return div64_u64(runtime << 20, period);
9f0c1e56
PZ
8418}
8419
9a7e0b18
PZ
8420/* Must be called with tasklist_lock held */
8421static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 8422{
9a7e0b18 8423 struct task_struct *g, *p;
b40b2e8e 8424
9a7e0b18
PZ
8425 do_each_thread(g, p) {
8426 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8427 return 1;
8428 } while_each_thread(g, p);
b40b2e8e 8429
9a7e0b18
PZ
8430 return 0;
8431}
b40b2e8e 8432
9a7e0b18
PZ
8433struct rt_schedulable_data {
8434 struct task_group *tg;
8435 u64 rt_period;
8436 u64 rt_runtime;
8437};
b40b2e8e 8438
9a7e0b18
PZ
8439static int tg_schedulable(struct task_group *tg, void *data)
8440{
8441 struct rt_schedulable_data *d = data;
8442 struct task_group *child;
8443 unsigned long total, sum = 0;
8444 u64 period, runtime;
b40b2e8e 8445
9a7e0b18
PZ
8446 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8447 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 8448
9a7e0b18
PZ
8449 if (tg == d->tg) {
8450 period = d->rt_period;
8451 runtime = d->rt_runtime;
b40b2e8e 8452 }
b40b2e8e 8453
4653f803
PZ
8454 /*
8455 * Cannot have more runtime than the period.
8456 */
8457 if (runtime > period && runtime != RUNTIME_INF)
8458 return -EINVAL;
6f505b16 8459
4653f803
PZ
8460 /*
8461 * Ensure we don't starve existing RT tasks.
8462 */
9a7e0b18
PZ
8463 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8464 return -EBUSY;
6f505b16 8465
9a7e0b18 8466 total = to_ratio(period, runtime);
6f505b16 8467
4653f803
PZ
8468 /*
8469 * Nobody can have more than the global setting allows.
8470 */
8471 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8472 return -EINVAL;
6f505b16 8473
4653f803
PZ
8474 /*
8475 * The sum of our children's runtime should not exceed our own.
8476 */
9a7e0b18
PZ
8477 list_for_each_entry_rcu(child, &tg->children, siblings) {
8478 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8479 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 8480
9a7e0b18
PZ
8481 if (child == d->tg) {
8482 period = d->rt_period;
8483 runtime = d->rt_runtime;
8484 }
6f505b16 8485
9a7e0b18 8486 sum += to_ratio(period, runtime);
9f0c1e56 8487 }
6f505b16 8488
9a7e0b18
PZ
8489 if (sum > total)
8490 return -EINVAL;
8491
8492 return 0;
6f505b16
PZ
8493}
8494
9a7e0b18 8495static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 8496{
9a7e0b18
PZ
8497 struct rt_schedulable_data data = {
8498 .tg = tg,
8499 .rt_period = period,
8500 .rt_runtime = runtime,
8501 };
8502
8503 return walk_tg_tree(tg_schedulable, tg_nop, &data);
521f1a24
DG
8504}
8505
d0b27fa7
PZ
8506static int tg_set_bandwidth(struct task_group *tg,
8507 u64 rt_period, u64 rt_runtime)
6f505b16 8508{
ac086bc2 8509 int i, err = 0;
9f0c1e56 8510
9f0c1e56 8511 mutex_lock(&rt_constraints_mutex);
521f1a24 8512 read_lock(&tasklist_lock);
9a7e0b18
PZ
8513 err = __rt_schedulable(tg, rt_period, rt_runtime);
8514 if (err)
9f0c1e56 8515 goto unlock;
ac086bc2 8516
0986b11b 8517 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
8518 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8519 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
8520
8521 for_each_possible_cpu(i) {
8522 struct rt_rq *rt_rq = tg->rt_rq[i];
8523
0986b11b 8524 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 8525 rt_rq->rt_runtime = rt_runtime;
0986b11b 8526 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 8527 }
0986b11b 8528 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
9f0c1e56 8529 unlock:
521f1a24 8530 read_unlock(&tasklist_lock);
9f0c1e56
PZ
8531 mutex_unlock(&rt_constraints_mutex);
8532
8533 return err;
6f505b16
PZ
8534}
8535
d0b27fa7
PZ
8536int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8537{
8538 u64 rt_runtime, rt_period;
8539
8540 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8541 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8542 if (rt_runtime_us < 0)
8543 rt_runtime = RUNTIME_INF;
8544
8545 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8546}
8547
9f0c1e56
PZ
8548long sched_group_rt_runtime(struct task_group *tg)
8549{
8550 u64 rt_runtime_us;
8551
d0b27fa7 8552 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
8553 return -1;
8554
d0b27fa7 8555 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
8556 do_div(rt_runtime_us, NSEC_PER_USEC);
8557 return rt_runtime_us;
8558}
d0b27fa7
PZ
8559
8560int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8561{
8562 u64 rt_runtime, rt_period;
8563
8564 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8565 rt_runtime = tg->rt_bandwidth.rt_runtime;
8566
619b0488
R
8567 if (rt_period == 0)
8568 return -EINVAL;
8569
d0b27fa7
PZ
8570 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8571}
8572
8573long sched_group_rt_period(struct task_group *tg)
8574{
8575 u64 rt_period_us;
8576
8577 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8578 do_div(rt_period_us, NSEC_PER_USEC);
8579 return rt_period_us;
8580}
8581
8582static int sched_rt_global_constraints(void)
8583{
4653f803 8584 u64 runtime, period;
d0b27fa7
PZ
8585 int ret = 0;
8586
ec5d4989
HS
8587 if (sysctl_sched_rt_period <= 0)
8588 return -EINVAL;
8589
4653f803
PZ
8590 runtime = global_rt_runtime();
8591 period = global_rt_period();
8592
8593 /*
8594 * Sanity check on the sysctl variables.
8595 */
8596 if (runtime > period && runtime != RUNTIME_INF)
8597 return -EINVAL;
10b612f4 8598
d0b27fa7 8599 mutex_lock(&rt_constraints_mutex);
9a7e0b18 8600 read_lock(&tasklist_lock);
4653f803 8601 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 8602 read_unlock(&tasklist_lock);
d0b27fa7
PZ
8603 mutex_unlock(&rt_constraints_mutex);
8604
8605 return ret;
8606}
54e99124
DG
8607
8608int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8609{
8610 /* Don't accept realtime tasks when there is no way for them to run */
8611 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8612 return 0;
8613
8614 return 1;
8615}
8616
6d6bc0ad 8617#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
8618static int sched_rt_global_constraints(void)
8619{
ac086bc2
PZ
8620 unsigned long flags;
8621 int i;
8622
ec5d4989
HS
8623 if (sysctl_sched_rt_period <= 0)
8624 return -EINVAL;
8625
60aa605d
PZ
8626 /*
8627 * There's always some RT tasks in the root group
8628 * -- migration, kstopmachine etc..
8629 */
8630 if (sysctl_sched_rt_runtime == 0)
8631 return -EBUSY;
8632
0986b11b 8633 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
8634 for_each_possible_cpu(i) {
8635 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8636
0986b11b 8637 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 8638 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 8639 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 8640 }
0986b11b 8641 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 8642
d0b27fa7
PZ
8643 return 0;
8644}
6d6bc0ad 8645#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
8646
8647int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 8648 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
8649 loff_t *ppos)
8650{
8651 int ret;
8652 int old_period, old_runtime;
8653 static DEFINE_MUTEX(mutex);
8654
8655 mutex_lock(&mutex);
8656 old_period = sysctl_sched_rt_period;
8657 old_runtime = sysctl_sched_rt_runtime;
8658
8d65af78 8659 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
8660
8661 if (!ret && write) {
8662 ret = sched_rt_global_constraints();
8663 if (ret) {
8664 sysctl_sched_rt_period = old_period;
8665 sysctl_sched_rt_runtime = old_runtime;
8666 } else {
8667 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8668 def_rt_bandwidth.rt_period =
8669 ns_to_ktime(global_rt_period());
8670 }
8671 }
8672 mutex_unlock(&mutex);
8673
8674 return ret;
8675}
68318b8e 8676
052f1dc7 8677#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
8678
8679/* return corresponding task_group object of a cgroup */
2b01dfe3 8680static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
68318b8e 8681{
2b01dfe3
PM
8682 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
8683 struct task_group, css);
68318b8e
SV
8684}
8685
8686static struct cgroup_subsys_state *
2b01dfe3 8687cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 8688{
ec7dc8ac 8689 struct task_group *tg, *parent;
68318b8e 8690
2b01dfe3 8691 if (!cgrp->parent) {
68318b8e 8692 /* This is early initialization for the top cgroup */
68318b8e
SV
8693 return &init_task_group.css;
8694 }
8695
ec7dc8ac
DG
8696 parent = cgroup_tg(cgrp->parent);
8697 tg = sched_create_group(parent);
68318b8e
SV
8698 if (IS_ERR(tg))
8699 return ERR_PTR(-ENOMEM);
8700
68318b8e
SV
8701 return &tg->css;
8702}
8703
41a2d6cf
IM
8704static void
8705cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 8706{
2b01dfe3 8707 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
8708
8709 sched_destroy_group(tg);
8710}
8711
41a2d6cf 8712static int
be367d09 8713cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
68318b8e 8714{
b68aa230 8715#ifdef CONFIG_RT_GROUP_SCHED
54e99124 8716 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
b68aa230
PZ
8717 return -EINVAL;
8718#else
68318b8e
SV
8719 /* We don't support RT-tasks being in separate groups */
8720 if (tsk->sched_class != &fair_sched_class)
8721 return -EINVAL;
b68aa230 8722#endif
be367d09
BB
8723 return 0;
8724}
68318b8e 8725
be367d09
BB
8726static int
8727cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8728 struct task_struct *tsk, bool threadgroup)
8729{
8730 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
8731 if (retval)
8732 return retval;
8733 if (threadgroup) {
8734 struct task_struct *c;
8735 rcu_read_lock();
8736 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8737 retval = cpu_cgroup_can_attach_task(cgrp, c);
8738 if (retval) {
8739 rcu_read_unlock();
8740 return retval;
8741 }
8742 }
8743 rcu_read_unlock();
8744 }
68318b8e
SV
8745 return 0;
8746}
8747
8748static void
2b01dfe3 8749cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
be367d09
BB
8750 struct cgroup *old_cont, struct task_struct *tsk,
8751 bool threadgroup)
68318b8e
SV
8752{
8753 sched_move_task(tsk);
be367d09
BB
8754 if (threadgroup) {
8755 struct task_struct *c;
8756 rcu_read_lock();
8757 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
8758 sched_move_task(c);
8759 }
8760 rcu_read_unlock();
8761 }
68318b8e
SV
8762}
8763
052f1dc7 8764#ifdef CONFIG_FAIR_GROUP_SCHED
f4c753b7 8765static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
2b01dfe3 8766 u64 shareval)
68318b8e 8767{
2b01dfe3 8768 return sched_group_set_shares(cgroup_tg(cgrp), shareval);
68318b8e
SV
8769}
8770
f4c753b7 8771static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
68318b8e 8772{
2b01dfe3 8773 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
8774
8775 return (u64) tg->shares;
8776}
6d6bc0ad 8777#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 8778
052f1dc7 8779#ifdef CONFIG_RT_GROUP_SCHED
0c70814c 8780static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
06ecb27c 8781 s64 val)
6f505b16 8782{
06ecb27c 8783 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
6f505b16
PZ
8784}
8785
06ecb27c 8786static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
6f505b16 8787{
06ecb27c 8788 return sched_group_rt_runtime(cgroup_tg(cgrp));
6f505b16 8789}
d0b27fa7
PZ
8790
8791static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8792 u64 rt_period_us)
8793{
8794 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
8795}
8796
8797static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8798{
8799 return sched_group_rt_period(cgroup_tg(cgrp));
8800}
6d6bc0ad 8801#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 8802
fe5c7cc2 8803static struct cftype cpu_files[] = {
052f1dc7 8804#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
8805 {
8806 .name = "shares",
f4c753b7
PM
8807 .read_u64 = cpu_shares_read_u64,
8808 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 8809 },
052f1dc7
PZ
8810#endif
8811#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8812 {
9f0c1e56 8813 .name = "rt_runtime_us",
06ecb27c
PM
8814 .read_s64 = cpu_rt_runtime_read,
8815 .write_s64 = cpu_rt_runtime_write,
6f505b16 8816 },
d0b27fa7
PZ
8817 {
8818 .name = "rt_period_us",
f4c753b7
PM
8819 .read_u64 = cpu_rt_period_read_uint,
8820 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 8821 },
052f1dc7 8822#endif
68318b8e
SV
8823};
8824
8825static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
8826{
fe5c7cc2 8827 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
68318b8e
SV
8828}
8829
8830struct cgroup_subsys cpu_cgroup_subsys = {
38605cae
IM
8831 .name = "cpu",
8832 .create = cpu_cgroup_create,
8833 .destroy = cpu_cgroup_destroy,
8834 .can_attach = cpu_cgroup_can_attach,
8835 .attach = cpu_cgroup_attach,
8836 .populate = cpu_cgroup_populate,
8837 .subsys_id = cpu_cgroup_subsys_id,
68318b8e
SV
8838 .early_init = 1,
8839};
8840
052f1dc7 8841#endif /* CONFIG_CGROUP_SCHED */
d842de87
SV
8842
8843#ifdef CONFIG_CGROUP_CPUACCT
8844
8845/*
8846 * CPU accounting code for task groups.
8847 *
8848 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
8849 * (balbir@in.ibm.com).
8850 */
8851
934352f2 8852/* track cpu usage of a group of tasks and its child groups */
d842de87
SV
8853struct cpuacct {
8854 struct cgroup_subsys_state css;
8855 /* cpuusage holds pointer to a u64-type object on every cpu */
43cf38eb 8856 u64 __percpu *cpuusage;
ef12fefa 8857 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
934352f2 8858 struct cpuacct *parent;
d842de87
SV
8859};
8860
8861struct cgroup_subsys cpuacct_subsys;
8862
8863/* return cpu accounting group corresponding to this container */
32cd756a 8864static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
d842de87 8865{
32cd756a 8866 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
d842de87
SV
8867 struct cpuacct, css);
8868}
8869
8870/* return cpu accounting group to which this task belongs */
8871static inline struct cpuacct *task_ca(struct task_struct *tsk)
8872{
8873 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
8874 struct cpuacct, css);
8875}
8876
8877/* create a new cpu accounting group */
8878static struct cgroup_subsys_state *cpuacct_create(
32cd756a 8879 struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87
SV
8880{
8881 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
ef12fefa 8882 int i;
d842de87
SV
8883
8884 if (!ca)
ef12fefa 8885 goto out;
d842de87
SV
8886
8887 ca->cpuusage = alloc_percpu(u64);
ef12fefa
BR
8888 if (!ca->cpuusage)
8889 goto out_free_ca;
8890
8891 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
8892 if (percpu_counter_init(&ca->cpustat[i], 0))
8893 goto out_free_counters;
d842de87 8894
934352f2
BR
8895 if (cgrp->parent)
8896 ca->parent = cgroup_ca(cgrp->parent);
8897
d842de87 8898 return &ca->css;
ef12fefa
BR
8899
8900out_free_counters:
8901 while (--i >= 0)
8902 percpu_counter_destroy(&ca->cpustat[i]);
8903 free_percpu(ca->cpuusage);
8904out_free_ca:
8905 kfree(ca);
8906out:
8907 return ERR_PTR(-ENOMEM);
d842de87
SV
8908}
8909
8910/* destroy an existing cpu accounting group */
41a2d6cf 8911static void
32cd756a 8912cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 8913{
32cd756a 8914 struct cpuacct *ca = cgroup_ca(cgrp);
ef12fefa 8915 int i;
d842de87 8916
ef12fefa
BR
8917 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
8918 percpu_counter_destroy(&ca->cpustat[i]);
d842de87
SV
8919 free_percpu(ca->cpuusage);
8920 kfree(ca);
8921}
8922
720f5498
KC
8923static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
8924{
b36128c8 8925 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
8926 u64 data;
8927
8928#ifndef CONFIG_64BIT
8929 /*
8930 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
8931 */
05fa785c 8932 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 8933 data = *cpuusage;
05fa785c 8934 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
8935#else
8936 data = *cpuusage;
8937#endif
8938
8939 return data;
8940}
8941
8942static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
8943{
b36128c8 8944 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
8945
8946#ifndef CONFIG_64BIT
8947 /*
8948 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
8949 */
05fa785c 8950 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 8951 *cpuusage = val;
05fa785c 8952 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
8953#else
8954 *cpuusage = val;
8955#endif
8956}
8957
d842de87 8958/* return total cpu usage (in nanoseconds) of a group */
32cd756a 8959static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
d842de87 8960{
32cd756a 8961 struct cpuacct *ca = cgroup_ca(cgrp);
d842de87
SV
8962 u64 totalcpuusage = 0;
8963 int i;
8964
720f5498
KC
8965 for_each_present_cpu(i)
8966 totalcpuusage += cpuacct_cpuusage_read(ca, i);
d842de87
SV
8967
8968 return totalcpuusage;
8969}
8970
0297b803
DG
8971static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
8972 u64 reset)
8973{
8974 struct cpuacct *ca = cgroup_ca(cgrp);
8975 int err = 0;
8976 int i;
8977
8978 if (reset) {
8979 err = -EINVAL;
8980 goto out;
8981 }
8982
720f5498
KC
8983 for_each_present_cpu(i)
8984 cpuacct_cpuusage_write(ca, i, 0);
0297b803 8985
0297b803
DG
8986out:
8987 return err;
8988}
8989
e9515c3c
KC
8990static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
8991 struct seq_file *m)
8992{
8993 struct cpuacct *ca = cgroup_ca(cgroup);
8994 u64 percpu;
8995 int i;
8996
8997 for_each_present_cpu(i) {
8998 percpu = cpuacct_cpuusage_read(ca, i);
8999 seq_printf(m, "%llu ", (unsigned long long) percpu);
9000 }
9001 seq_printf(m, "\n");
9002 return 0;
9003}
9004
ef12fefa
BR
9005static const char *cpuacct_stat_desc[] = {
9006 [CPUACCT_STAT_USER] = "user",
9007 [CPUACCT_STAT_SYSTEM] = "system",
9008};
9009
9010static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9011 struct cgroup_map_cb *cb)
9012{
9013 struct cpuacct *ca = cgroup_ca(cgrp);
9014 int i;
9015
9016 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9017 s64 val = percpu_counter_read(&ca->cpustat[i]);
9018 val = cputime64_to_clock_t(val);
9019 cb->fill(cb, cpuacct_stat_desc[i], val);
9020 }
9021 return 0;
9022}
9023
d842de87
SV
9024static struct cftype files[] = {
9025 {
9026 .name = "usage",
f4c753b7
PM
9027 .read_u64 = cpuusage_read,
9028 .write_u64 = cpuusage_write,
d842de87 9029 },
e9515c3c
KC
9030 {
9031 .name = "usage_percpu",
9032 .read_seq_string = cpuacct_percpu_seq_read,
9033 },
ef12fefa
BR
9034 {
9035 .name = "stat",
9036 .read_map = cpuacct_stats_show,
9037 },
d842de87
SV
9038};
9039
32cd756a 9040static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 9041{
32cd756a 9042 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
d842de87
SV
9043}
9044
9045/*
9046 * charge this task's execution time to its accounting group.
9047 *
9048 * called with rq->lock held.
9049 */
9050static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9051{
9052 struct cpuacct *ca;
934352f2 9053 int cpu;
d842de87 9054
c40c6f85 9055 if (unlikely(!cpuacct_subsys.active))
d842de87
SV
9056 return;
9057
934352f2 9058 cpu = task_cpu(tsk);
a18b83b7
BR
9059
9060 rcu_read_lock();
9061
d842de87 9062 ca = task_ca(tsk);
d842de87 9063
934352f2 9064 for (; ca; ca = ca->parent) {
b36128c8 9065 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
d842de87
SV
9066 *cpuusage += cputime;
9067 }
a18b83b7
BR
9068
9069 rcu_read_unlock();
d842de87
SV
9070}
9071
fa535a77
AB
9072/*
9073 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9074 * in cputime_t units. As a result, cpuacct_update_stats calls
9075 * percpu_counter_add with values large enough to always overflow the
9076 * per cpu batch limit causing bad SMP scalability.
9077 *
9078 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9079 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9080 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9081 */
9082#ifdef CONFIG_SMP
9083#define CPUACCT_BATCH \
9084 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9085#else
9086#define CPUACCT_BATCH 0
9087#endif
9088
ef12fefa
BR
9089/*
9090 * Charge the system/user time to the task's accounting group.
9091 */
9092static void cpuacct_update_stats(struct task_struct *tsk,
9093 enum cpuacct_stat_index idx, cputime_t val)
9094{
9095 struct cpuacct *ca;
fa535a77 9096 int batch = CPUACCT_BATCH;
ef12fefa
BR
9097
9098 if (unlikely(!cpuacct_subsys.active))
9099 return;
9100
9101 rcu_read_lock();
9102 ca = task_ca(tsk);
9103
9104 do {
fa535a77 9105 __percpu_counter_add(&ca->cpustat[idx], val, batch);
ef12fefa
BR
9106 ca = ca->parent;
9107 } while (ca);
9108 rcu_read_unlock();
9109}
9110
d842de87
SV
9111struct cgroup_subsys cpuacct_subsys = {
9112 .name = "cpuacct",
9113 .create = cpuacct_create,
9114 .destroy = cpuacct_destroy,
9115 .populate = cpuacct_populate,
9116 .subsys_id = cpuacct_subsys_id,
9117};
9118#endif /* CONFIG_CGROUP_CPUACCT */
03b042bf
PM
9119
9120#ifndef CONFIG_SMP
9121
03b042bf
PM
9122void synchronize_sched_expedited(void)
9123{
fc390cde 9124 barrier();
03b042bf
PM
9125}
9126EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9127
9128#else /* #ifndef CONFIG_SMP */
9129
cc631fb7 9130static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
03b042bf 9131
cc631fb7 9132static int synchronize_sched_expedited_cpu_stop(void *data)
03b042bf 9133{
969c7921
TH
9134 /*
9135 * There must be a full memory barrier on each affected CPU
9136 * between the time that try_stop_cpus() is called and the
9137 * time that it returns.
9138 *
9139 * In the current initial implementation of cpu_stop, the
9140 * above condition is already met when the control reaches
9141 * this point and the following smp_mb() is not strictly
9142 * necessary. Do smp_mb() anyway for documentation and
9143 * robustness against future implementation changes.
9144 */
cc631fb7 9145 smp_mb(); /* See above comment block. */
969c7921 9146 return 0;
03b042bf 9147}
03b042bf
PM
9148
9149/*
9150 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
9151 * approach to force grace period to end quickly. This consumes
9152 * significant time on all CPUs, and is thus not recommended for
9153 * any sort of common-case code.
9154 *
9155 * Note that it is illegal to call this function while holding any
9156 * lock that is acquired by a CPU-hotplug notifier. Failing to
9157 * observe this restriction will result in deadlock.
9158 */
9159void synchronize_sched_expedited(void)
9160{
969c7921 9161 int snap, trycount = 0;
03b042bf
PM
9162
9163 smp_mb(); /* ensure prior mod happens before capturing snap. */
969c7921 9164 snap = atomic_read(&synchronize_sched_expedited_count) + 1;
03b042bf 9165 get_online_cpus();
969c7921
TH
9166 while (try_stop_cpus(cpu_online_mask,
9167 synchronize_sched_expedited_cpu_stop,
94458d5e 9168 NULL) == -EAGAIN) {
03b042bf
PM
9169 put_online_cpus();
9170 if (trycount++ < 10)
9171 udelay(trycount * num_online_cpus());
9172 else {
9173 synchronize_sched();
9174 return;
9175 }
969c7921 9176 if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
03b042bf
PM
9177 smp_mb(); /* ensure test happens before caller kfree */
9178 return;
9179 }
9180 get_online_cpus();
9181 }
969c7921 9182 atomic_inc(&synchronize_sched_expedited_count);
cc631fb7 9183 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
03b042bf 9184 put_online_cpus();
03b042bf
PM
9185}
9186EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9187
9188#endif /* #else #ifndef CONFIG_SMP */