]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/kernel_stat.h
tg3: Improve small packet performance
[net-next-2.6.git] / include / linux / kernel_stat.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_KERNEL_STAT_H
2#define _LINUX_KERNEL_STAT_H
3
1da177e4
LT
4#include <linux/smp.h>
5#include <linux/threads.h>
6#include <linux/percpu.h>
28ef3584 7#include <linux/cpumask.h>
aa0ce5bb 8#include <linux/interrupt.h>
6859a840 9#include <asm/irq.h>
1da177e4
LT
10#include <asm/cputime.h>
11
12/*
13 * 'kernel_stat.h' contains the definitions needed for doing
14 * some kernel statistics (CPU usage, context switches ...),
15 * used by rstatd/perfmeter
16 */
17
18struct cpu_usage_stat {
19 cputime64_t user;
20 cputime64_t nice;
21 cputime64_t system;
22 cputime64_t softirq;
23 cputime64_t irq;
24 cputime64_t idle;
25 cputime64_t iowait;
26 cputime64_t steal;
5e84cfde 27 cputime64_t guest;
ce0e7b28 28 cputime64_t guest_nice;
1da177e4
LT
29};
30
31struct kernel_stat {
32 struct cpu_usage_stat cpustat;
d7e51e66 33#ifndef CONFIG_GENERIC_HARDIRQS
0b8f1efa
YL
34 unsigned int irqs[NR_IRQS];
35#endif
aa0ce5bb 36 unsigned int softirqs[NR_SOFTIRQS];
1da177e4
LT
37};
38
39DECLARE_PER_CPU(struct kernel_stat, kstat);
40
41#define kstat_cpu(cpu) per_cpu(kstat, cpu)
42/* Must have preemption disabled for this to be meaningful. */
43#define kstat_this_cpu __get_cpu_var(kstat)
44
45extern unsigned long long nr_context_switches(void);
46
d7e51e66 47#ifndef CONFIG_GENERIC_HARDIRQS
0b8f1efa
YL
48#define kstat_irqs_this_cpu(irq) \
49 (kstat_this_cpu.irqs[irq])
50
d6c88a50 51struct irq_desc;
8c464a4b 52
d6c88a50
TG
53static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
54 struct irq_desc *desc)
55{
56 kstat_this_cpu.irqs[irq]++;
57}
8c464a4b 58
7f95ec9e
YL
59static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
60{
61 return kstat_cpu(cpu).irqs[irq];
62}
0b8f1efa 63#else
d52a61c0 64#include <linux/irq.h>
0b8f1efa 65extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
d52a61c0
YL
66#define kstat_irqs_this_cpu(DESC) \
67 ((DESC)->kstat_irqs[smp_processor_id()])
68#define kstat_incr_irqs_this_cpu(irqno, DESC) \
69 ((DESC)->kstat_irqs[smp_processor_id()]++)
70
0b8f1efa 71#endif
7f95ec9e 72
aa0ce5bb
KK
73static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
74{
75 kstat_this_cpu.softirqs[irq]++;
76}
77
78static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
79{
80 return kstat_cpu(cpu).softirqs[irq];
81}
82
1da177e4
LT
83/*
84 * Number of interrupts per specific IRQ source, since bootup
85 */
7f95ec9e 86static inline unsigned int kstat_irqs(unsigned int irq)
1da177e4 87{
7f95ec9e
YL
88 unsigned int sum = 0;
89 int cpu;
1da177e4 90
0a945022 91 for_each_possible_cpu(cpu)
7f95ec9e 92 sum += kstat_irqs_cpu(irq, cpu);
1da177e4
LT
93
94 return sum;
95}
96
aa9c4c0f
IM
97
98/*
99 * Lock/unlock the current runqueue - to extract task statistics:
100 */
bb34d92f 101extern unsigned long long task_delta_exec(struct task_struct *);
aa9c4c0f 102
457533a7
MS
103extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
104extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
79741dd3
MS
105extern void account_steal_time(cputime_t);
106extern void account_idle_time(cputime_t);
107
108extern void account_process_tick(struct task_struct *, int user);
109extern void account_steal_ticks(unsigned long ticks);
110extern void account_idle_ticks(unsigned long ticks);
1da177e4
LT
111
112#endif /* _LINUX_KERNEL_STAT_H */