]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/sched_stats.h
cpumask: Use all NR_CPUS bits unless CONFIG_CPUMASK_OFFSTACK
[net-next-2.6.git] / kernel / sched_stats.h
CommitLineData
425e0968
IM
1
2#ifdef CONFIG_SCHEDSTATS
3/*
4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort)
6 */
7#define SCHEDSTAT_VERSION 14
8
9static int show_schedstat(struct seq_file *seq, void *v)
10{
11 int cpu;
b9689052 12 int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
39106dcf
MT
13 char *mask_str = kmalloc(mask_len, GFP_KERNEL);
14
15 if (mask_str == NULL)
16 return -ENOMEM;
425e0968
IM
17
18 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
19 seq_printf(seq, "timestamp %lu\n", jiffies);
20 for_each_online_cpu(cpu) {
21 struct rq *rq = cpu_rq(cpu);
22#ifdef CONFIG_SMP
23 struct sched_domain *sd;
2d72376b 24 int dcount = 0;
425e0968
IM
25#endif
26
27 /* runqueue-specific stats */
28 seq_printf(seq,
480b9434 29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
425e0968 30 cpu, rq->yld_both_empty,
2d72376b
IM
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local,
425e0968 34 rq->rq_sched_info.cpu_time,
2d72376b 35 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
425e0968
IM
36
37 seq_printf(seq, "\n");
38
39#ifdef CONFIG_SMP
40 /* domain-specific stats */
41 preempt_disable();
42 for_each_domain(cpu, sd) {
43 enum cpu_idle_type itype;
425e0968 44
29c0177e 45 cpumask_scnprintf(mask_str, mask_len, &sd->span);
2d72376b 46 seq_printf(seq, "domain%d %s", dcount++, mask_str);
425e0968
IM
47 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
48 itype++) {
480b9434 49 seq_printf(seq, " %u %u %u %u %u %u %u %u",
2d72376b 50 sd->lb_count[itype],
425e0968
IM
51 sd->lb_balanced[itype],
52 sd->lb_failed[itype],
53 sd->lb_imbalance[itype],
54 sd->lb_gained[itype],
55 sd->lb_hot_gained[itype],
56 sd->lb_nobusyq[itype],
57 sd->lb_nobusyg[itype]);
58 }
f95e0d1c
IM
59 seq_printf(seq,
60 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
2d72376b
IM
61 sd->alb_count, sd->alb_failed, sd->alb_pushed,
62 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
63 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
425e0968
IM
64 sd->ttwu_wake_remote, sd->ttwu_move_affine,
65 sd->ttwu_move_balance);
66 }
67 preempt_enable();
68#endif
69 }
c6fba545 70 kfree(mask_str);
425e0968
IM
71 return 0;
72}
73
74static int schedstat_open(struct inode *inode, struct file *file)
75{
76 unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
77 char *buf = kmalloc(size, GFP_KERNEL);
78 struct seq_file *m;
79 int res;
80
81 if (!buf)
82 return -ENOMEM;
83 res = single_open(file, show_schedstat, NULL);
84 if (!res) {
85 m = file->private_data;
86 m->buf = buf;
87 m->size = size;
88 } else
89 kfree(buf);
90 return res;
91}
92
b5aadf7f 93static const struct file_operations proc_schedstat_operations = {
425e0968
IM
94 .open = schedstat_open,
95 .read = seq_read,
96 .llseek = seq_lseek,
97 .release = single_release,
98};
99
b5aadf7f
AD
100static int __init proc_schedstat_init(void)
101{
102 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
103 return 0;
104}
105module_init(proc_schedstat_init);
106
425e0968
IM
107/*
108 * Expects runqueue lock to be held for atomicity of update
109 */
110static inline void
111rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
112{
113 if (rq) {
114 rq->rq_sched_info.run_delay += delta;
2d72376b 115 rq->rq_sched_info.pcount++;
425e0968
IM
116 }
117}
118
119/*
120 * Expects runqueue lock to be held for atomicity of update
121 */
122static inline void
123rq_sched_info_depart(struct rq *rq, unsigned long long delta)
124{
125 if (rq)
126 rq->rq_sched_info.cpu_time += delta;
127}
46ac22ba
AG
128
129static inline void
130rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
131{
132 if (rq)
133 rq->rq_sched_info.run_delay += delta;
134}
425e0968
IM
135# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
136# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
c3c70119 137# define schedstat_set(var, val) do { var = (val); } while (0)
425e0968
IM
138#else /* !CONFIG_SCHEDSTATS */
139static inline void
140rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
141{}
142static inline void
46ac22ba
AG
143rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
144{}
145static inline void
425e0968
IM
146rq_sched_info_depart(struct rq *rq, unsigned long long delta)
147{}
148# define schedstat_inc(rq, field) do { } while (0)
149# define schedstat_add(rq, field, amt) do { } while (0)
c3c70119 150# define schedstat_set(var, val) do { } while (0)
425e0968
IM
151#endif
152
9a41785c 153#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
46ac22ba
AG
154static inline void sched_info_reset_dequeued(struct task_struct *t)
155{
156 t->sched_info.last_queued = 0;
157}
158
425e0968
IM
159/*
160 * Called when a process is dequeued from the active array and given
161 * the cpu. We should note that with the exception of interactive
162 * tasks, the expired queue will become the active queue after the active
163 * queue is empty, without explicitly dequeuing and requeuing tasks in the
164 * expired queue. (Interactive tasks may be requeued directly to the
165 * active queue, thus delaying tasks in the expired queue from running;
166 * see scheduler_tick()).
167 *
46ac22ba
AG
168 * Though we are interested in knowing how long it was from the *first* time a
169 * task was queued to the time that it finally hit a cpu, we call this routine
170 * from dequeue_task() to account for possible rq->clock skew across cpus. The
171 * delta taken on each cpu would annul the skew.
425e0968
IM
172 */
173static inline void sched_info_dequeued(struct task_struct *t)
174{
46ac22ba
AG
175 unsigned long long now = task_rq(t)->clock, delta = 0;
176
177 if (unlikely(sched_info_on()))
178 if (t->sched_info.last_queued)
179 delta = now - t->sched_info.last_queued;
180 sched_info_reset_dequeued(t);
181 t->sched_info.run_delay += delta;
182
183 rq_sched_info_dequeued(task_rq(t), delta);
425e0968
IM
184}
185
186/*
187 * Called when a task finally hits the cpu. We can now calculate how
188 * long it was waiting to run. We also note when it began so that we
189 * can keep stats on how long its timeslice is.
190 */
191static void sched_info_arrive(struct task_struct *t)
192{
9a41785c 193 unsigned long long now = task_rq(t)->clock, delta = 0;
425e0968
IM
194
195 if (t->sched_info.last_queued)
196 delta = now - t->sched_info.last_queued;
46ac22ba 197 sched_info_reset_dequeued(t);
425e0968
IM
198 t->sched_info.run_delay += delta;
199 t->sched_info.last_arrival = now;
2d72376b 200 t->sched_info.pcount++;
425e0968
IM
201
202 rq_sched_info_arrive(task_rq(t), delta);
203}
204
205/*
206 * Called when a process is queued into either the active or expired
207 * array. The time is noted and later used to determine how long we
208 * had to wait for us to reach the cpu. Since the expired queue will
209 * become the active queue after active queue is empty, without dequeuing
210 * and requeuing any tasks, we are interested in queuing to either. It
211 * is unusual but not impossible for tasks to be dequeued and immediately
212 * requeued in the same or another array: this can happen in sched_yield(),
213 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
214 * to runqueue.
215 *
216 * This function is only called from enqueue_task(), but also only updates
217 * the timestamp if it is already not set. It's assumed that
218 * sched_info_dequeued() will clear that stamp when appropriate.
219 */
220static inline void sched_info_queued(struct task_struct *t)
221{
222 if (unlikely(sched_info_on()))
223 if (!t->sched_info.last_queued)
9a41785c 224 t->sched_info.last_queued = task_rq(t)->clock;
425e0968
IM
225}
226
227/*
228 * Called when a process ceases being the active-running process, either
229 * voluntarily or involuntarily. Now we can calculate how long we ran.
d4abc238
BR
230 * Also, if the process is still in the TASK_RUNNING state, call
231 * sched_info_queued() to mark that it has now again started waiting on
232 * the runqueue.
425e0968
IM
233 */
234static inline void sched_info_depart(struct task_struct *t)
235{
9a41785c
BS
236 unsigned long long delta = task_rq(t)->clock -
237 t->sched_info.last_arrival;
425e0968
IM
238
239 t->sched_info.cpu_time += delta;
240 rq_sched_info_depart(task_rq(t), delta);
d4abc238
BR
241
242 if (t->state == TASK_RUNNING)
243 sched_info_queued(t);
425e0968
IM
244}
245
246/*
247 * Called when tasks are switched involuntarily due, typically, to expiring
248 * their time slice. (This may also be called when switching to or from
249 * the idle task.) We are only called when prev != next.
250 */
251static inline void
252__sched_info_switch(struct task_struct *prev, struct task_struct *next)
253{
254 struct rq *rq = task_rq(prev);
255
256 /*
257 * prev now departs the cpu. It's not interesting to record
258 * stats about how efficient we were at scheduling the idle
259 * process, however.
260 */
261 if (prev != rq->idle)
262 sched_info_depart(prev);
263
264 if (next != rq->idle)
265 sched_info_arrive(next);
266}
267static inline void
268sched_info_switch(struct task_struct *prev, struct task_struct *next)
269{
270 if (unlikely(sched_info_on()))
271 __sched_info_switch(prev, next);
272}
273#else
46ac22ba
AG
274#define sched_info_queued(t) do { } while (0)
275#define sched_info_reset_dequeued(t) do { } while (0)
276#define sched_info_dequeued(t) do { } while (0)
277#define sched_info_switch(t, next) do { } while (0)
9a41785c 278#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
425e0968 279
bb34d92f
FM
280/*
281 * The following are functions that support scheduler-internal time accounting.
282 * These functions are generally called at the timer tick. None of this depends
283 * on CONFIG_SCHEDSTATS.
284 */
285
bb34d92f 286/**
7086efe1 287 * account_group_user_time - Maintain utime for a thread group.
bb34d92f 288 *
7086efe1
FM
289 * @tsk: Pointer to task structure.
290 * @cputime: Time value by which to increment the utime field of the
291 * thread_group_cputime structure.
bb34d92f
FM
292 *
293 * If thread group time is being maintained, get the structure for the
294 * running CPU and update the utime field there.
295 */
7086efe1
FM
296static inline void account_group_user_time(struct task_struct *tsk,
297 cputime_t cputime)
bb34d92f 298{
7086efe1
FM
299 struct signal_struct *sig;
300
ad133ba3
ON
301 /* tsk == current, ensure it is safe to use ->signal */
302 if (unlikely(tsk->exit_state))
7086efe1 303 return;
ad133ba3
ON
304
305 sig = tsk->signal;
7086efe1 306 if (sig->cputime.totals) {
bb34d92f
FM
307 struct task_cputime *times;
308
7086efe1 309 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
bb34d92f
FM
310 times->utime = cputime_add(times->utime, cputime);
311 put_cpu_no_resched();
312 }
313}
314
315/**
7086efe1 316 * account_group_system_time - Maintain stime for a thread group.
bb34d92f 317 *
7086efe1
FM
318 * @tsk: Pointer to task structure.
319 * @cputime: Time value by which to increment the stime field of the
320 * thread_group_cputime structure.
bb34d92f
FM
321 *
322 * If thread group time is being maintained, get the structure for the
323 * running CPU and update the stime field there.
324 */
7086efe1
FM
325static inline void account_group_system_time(struct task_struct *tsk,
326 cputime_t cputime)
bb34d92f 327{
7086efe1
FM
328 struct signal_struct *sig;
329
ad133ba3
ON
330 /* tsk == current, ensure it is safe to use ->signal */
331 if (unlikely(tsk->exit_state))
7086efe1 332 return;
ad133ba3
ON
333
334 sig = tsk->signal;
7086efe1 335 if (sig->cputime.totals) {
bb34d92f
FM
336 struct task_cputime *times;
337
7086efe1 338 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
bb34d92f
FM
339 times->stime = cputime_add(times->stime, cputime);
340 put_cpu_no_resched();
341 }
342}
343
344/**
7086efe1 345 * account_group_exec_runtime - Maintain exec runtime for a thread group.
bb34d92f 346 *
7086efe1 347 * @tsk: Pointer to task structure.
bb34d92f 348 * @ns: Time value by which to increment the sum_exec_runtime field
7086efe1 349 * of the thread_group_cputime structure.
bb34d92f
FM
350 *
351 * If thread group time is being maintained, get the structure for the
352 * running CPU and update the sum_exec_runtime field there.
353 */
7086efe1
FM
354static inline void account_group_exec_runtime(struct task_struct *tsk,
355 unsigned long long ns)
bb34d92f 356{
7086efe1
FM
357 struct signal_struct *sig;
358
359 sig = tsk->signal;
ad133ba3
ON
360 /* see __exit_signal()->task_rq_unlock_wait() */
361 barrier();
7086efe1
FM
362 if (unlikely(!sig))
363 return;
ad133ba3 364
7086efe1 365 if (sig->cputime.totals) {
bb34d92f
FM
366 struct task_cputime *times;
367
7086efe1 368 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
bb34d92f
FM
369 times->sum_exec_runtime += ns;
370 put_cpu_no_resched();
371 }
372}