]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/vmstat.h
[PATCH] sched: remove __cpuinitdata anotation to cpu_isolated_map
[net-next-2.6.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
2244b95a
CL
6#include <linux/mmzone.h>
7#include <asm/atomic.h>
f6ac2354 8
f8891e5e 9#ifdef CONFIG_VM_EVENT_COUNTERS
f6ac2354 10/*
f8891e5e 11 * Light weight per cpu counter implementation.
f6ac2354 12 *
2aea4fb6
PJ
13 * Counters should only be incremented. You need to set EMBEDDED
14 * to disable VM_EVENT_COUNTERS. Things like procps (vmstat,
15 * top, etc) use /proc/vmstat and depend on these counters.
f8891e5e
CL
16 *
17 * Counters are handled completely inline. On many platforms the code
18 * generated will simply be the increment of a global address.
f6ac2354 19 */
f8891e5e 20
27bf71c2
CL
21#ifdef CONFIG_ZONE_DMA32
22#define DMA32_ZONE(xx) xx##_DMA32,
23#else
24#define DMA32_ZONE(xx)
25#endif
26
27#ifdef CONFIG_HIGHMEM
28#define HIGHMEM_ZONE(xx) , xx##_HIGH
29#else
30#define HIGHMEM_ZONE(xx)
31#endif
32
33#define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
f8891e5e
CL
34
35enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
36 FOR_ALL_ZONES(PGALLOC),
37 PGFREE, PGACTIVATE, PGDEACTIVATE,
38 PGFAULT, PGMAJFAULT,
39 FOR_ALL_ZONES(PGREFILL),
40 FOR_ALL_ZONES(PGSTEAL),
41 FOR_ALL_ZONES(PGSCAN_KSWAPD),
42 FOR_ALL_ZONES(PGSCAN_DIRECT),
43 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
44 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
45 NR_VM_EVENT_ITEMS
46};
47
48struct vm_event_state {
49 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
50};
51
f8891e5e
CL
52DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
53
54static inline void __count_vm_event(enum vm_event_item item)
55{
38cbcdc0 56 __get_cpu_var(vm_event_states).event[item]++;
f8891e5e
CL
57}
58
59static inline void count_vm_event(enum vm_event_item item)
60{
38cbcdc0 61 get_cpu_var(vm_event_states).event[item]++;
f8891e5e
CL
62 put_cpu();
63}
64
65static inline void __count_vm_events(enum vm_event_item item, long delta)
66{
38cbcdc0 67 __get_cpu_var(vm_event_states).event[item] += delta;
f8891e5e
CL
68}
69
70static inline void count_vm_events(enum vm_event_item item, long delta)
71{
38cbcdc0 72 get_cpu_var(vm_event_states).event[item] += delta;
f8891e5e
CL
73 put_cpu();
74}
75
76extern void all_vm_events(unsigned long *);
77extern void vm_events_fold_cpu(int cpu);
78
79#else
80
81/* Disable counters */
82#define get_cpu_vm_events(e) 0L
83#define count_vm_event(e) do { } while (0)
84#define count_vm_events(e,d) do { } while (0)
85#define __count_vm_event(e) do { } while (0)
86#define __count_vm_events(e,d) do { } while (0)
87#define vm_events_fold_cpu(x) do { } while (0)
88
89#endif /* CONFIG_VM_EVENT_COUNTERS */
90
91#define __count_zone_vm_events(item, zone, delta) \
92 __count_vm_events(item##_DMA + zone_idx(zone), delta)
f6ac2354 93
2244b95a
CL
94/*
95 * Zone based page accounting with per cpu differentials.
96 */
97extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
98
99static inline void zone_page_state_add(long x, struct zone *zone,
100 enum zone_stat_item item)
101{
102 atomic_long_add(x, &zone->vm_stat[item]);
103 atomic_long_add(x, &vm_stat[item]);
104}
105
106static inline unsigned long global_page_state(enum zone_stat_item item)
107{
108 long x = atomic_long_read(&vm_stat[item]);
109#ifdef CONFIG_SMP
110 if (x < 0)
111 x = 0;
112#endif
113 return x;
114}
115
116static inline unsigned long zone_page_state(struct zone *zone,
117 enum zone_stat_item item)
118{
119 long x = atomic_long_read(&zone->vm_stat[item]);
120#ifdef CONFIG_SMP
121 if (x < 0)
122 x = 0;
123#endif
124 return x;
125}
126
127#ifdef CONFIG_NUMA
128/*
129 * Determine the per node value of a stat item. This function
130 * is called frequently in a NUMA machine, so try to be as
131 * frugal as possible.
132 */
133static inline unsigned long node_page_state(int node,
134 enum zone_stat_item item)
135{
136 struct zone *zones = NODE_DATA(node)->node_zones;
137
138 return
fb0e7942 139#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
140 zone_page_state(&zones[ZONE_DMA32], item) +
141#endif
142 zone_page_state(&zones[ZONE_NORMAL], item) +
2244b95a
CL
143#ifdef CONFIG_HIGHMEM
144 zone_page_state(&zones[ZONE_HIGHMEM], item) +
145#endif
146 zone_page_state(&zones[ZONE_DMA], item);
147}
ca889e6c
CL
148
149extern void zone_statistics(struct zonelist *, struct zone *);
150
2244b95a 151#else
ca889e6c 152
2244b95a 153#define node_page_state(node, item) global_page_state(item)
ca889e6c
CL
154#define zone_statistics(_zl,_z) do { } while (0)
155
156#endif /* CONFIG_NUMA */
2244b95a
CL
157
158#define __add_zone_page_state(__z, __i, __d) \
159 __mod_zone_page_state(__z, __i, __d)
160#define __sub_zone_page_state(__z, __i, __d) \
161 __mod_zone_page_state(__z, __i,-(__d))
162
163#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
164#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
165
166static inline void zap_zone_vm_stats(struct zone *zone)
167{
168 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
169}
170
ca889e6c
CL
171extern void inc_zone_state(struct zone *, enum zone_stat_item);
172
2244b95a
CL
173#ifdef CONFIG_SMP
174void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
175void __inc_zone_page_state(struct page *, enum zone_stat_item);
176void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 177
2244b95a
CL
178void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
179void inc_zone_page_state(struct page *, enum zone_stat_item);
180void dec_zone_page_state(struct page *, enum zone_stat_item);
181
182extern void inc_zone_state(struct zone *, enum zone_stat_item);
183
184void refresh_cpu_vm_stats(int);
185void refresh_vm_stats(void);
186
187#else /* CONFIG_SMP */
188
189/*
190 * We do not maintain differentials in a single processor configuration.
191 * The functions directly modify the zone and global counters.
192 */
193static inline void __mod_zone_page_state(struct zone *zone,
194 enum zone_stat_item item, int delta)
195{
196 zone_page_state_add(delta, zone, item);
197}
198
7f4599e9
CL
199static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
200{
201 atomic_long_inc(&zone->vm_stat[item]);
202 atomic_long_inc(&vm_stat[item]);
203}
204
2244b95a
CL
205static inline void __inc_zone_page_state(struct page *page,
206 enum zone_stat_item item)
207{
7f4599e9 208 __inc_zone_state(page_zone(page), item);
2244b95a
CL
209}
210
211static inline void __dec_zone_page_state(struct page *page,
212 enum zone_stat_item item)
213{
214 atomic_long_dec(&page_zone(page)->vm_stat[item]);
215 atomic_long_dec(&vm_stat[item]);
216}
217
218/*
219 * We only use atomic operations to update counters. So there is no need to
220 * disable interrupts.
221 */
222#define inc_zone_page_state __inc_zone_page_state
223#define dec_zone_page_state __dec_zone_page_state
224#define mod_zone_page_state __mod_zone_page_state
225
226static inline void refresh_cpu_vm_stats(int cpu) { }
227static inline void refresh_vm_stats(void) { }
228#endif
229
230#endif /* _LINUX_VMSTAT_H */