]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/vmstat.h
mm: remove __{add,sub}_zone_page_state()
[net-next-2.6.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a
CL
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
f6ac2354 9
4b51d669
CL
10#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
27bf71c2
CL
16#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
3b116300 28
2a1e274a 29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
f8891e5e
CL
30
31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
24cf7251
MG
39#ifdef CONFIG_NUMA
40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif
f8891e5e
CL
42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
3b116300
AL
44#ifdef CONFIG_HUGETLB_PAGE
45 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
bbfd28ee 46#endif
bbfd28ee
LS
47 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
48 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
49 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
5344b7e6
NP
50 UNEVICTABLE_PGMLOCKED,
51 UNEVICTABLE_PGMUNLOCKED,
52 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
53 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
985737cf 54 UNEVICTABLE_MLOCKFREED,
f8891e5e
CL
55 NR_VM_EVENT_ITEMS
56};
57
c748e134
AB
58extern int sysctl_stat_interval;
59
780a0656
AM
60#ifdef CONFIG_VM_EVENT_COUNTERS
61/*
62 * Light weight per cpu counter implementation.
63 *
64 * Counters should only be incremented and no critical kernel component
65 * should rely on the counter values.
66 *
67 * Counters are handled completely inline. On many platforms the code
68 * generated will simply be the increment of a global address.
69 */
70
f8891e5e
CL
71struct vm_event_state {
72 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
73};
74
f8891e5e
CL
75DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
76
77static inline void __count_vm_event(enum vm_event_item item)
78{
38cbcdc0 79 __get_cpu_var(vm_event_states).event[item]++;
f8891e5e
CL
80}
81
82static inline void count_vm_event(enum vm_event_item item)
83{
38cbcdc0 84 get_cpu_var(vm_event_states).event[item]++;
f8891e5e
CL
85 put_cpu();
86}
87
88static inline void __count_vm_events(enum vm_event_item item, long delta)
89{
38cbcdc0 90 __get_cpu_var(vm_event_states).event[item] += delta;
f8891e5e
CL
91}
92
93static inline void count_vm_events(enum vm_event_item item, long delta)
94{
38cbcdc0 95 get_cpu_var(vm_event_states).event[item] += delta;
f8891e5e
CL
96 put_cpu();
97}
98
99extern void all_vm_events(unsigned long *);
e903387f 100#ifdef CONFIG_HOTPLUG
f8891e5e 101extern void vm_events_fold_cpu(int cpu);
e903387f
MD
102#else
103static inline void vm_events_fold_cpu(int cpu)
104{
105}
106#endif
f8891e5e
CL
107
108#else
109
110/* Disable counters */
780a0656
AM
111static inline void count_vm_event(enum vm_event_item item)
112{
113}
114static inline void count_vm_events(enum vm_event_item item, long delta)
115{
116}
117static inline void __count_vm_event(enum vm_event_item item)
118{
119}
120static inline void __count_vm_events(enum vm_event_item item, long delta)
121{
122}
123static inline void all_vm_events(unsigned long *ret)
124{
125}
126static inline void vm_events_fold_cpu(int cpu)
127{
128}
f8891e5e
CL
129
130#endif /* CONFIG_VM_EVENT_COUNTERS */
131
132#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
133 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
134 zone_idx(zone), delta)
f6ac2354 135
2244b95a
CL
136/*
137 * Zone based page accounting with per cpu differentials.
138 */
139extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
140
141static inline void zone_page_state_add(long x, struct zone *zone,
142 enum zone_stat_item item)
143{
144 atomic_long_add(x, &zone->vm_stat[item]);
145 atomic_long_add(x, &vm_stat[item]);
146}
147
148static inline unsigned long global_page_state(enum zone_stat_item item)
149{
150 long x = atomic_long_read(&vm_stat[item]);
151#ifdef CONFIG_SMP
152 if (x < 0)
153 x = 0;
154#endif
155 return x;
156}
157
158static inline unsigned long zone_page_state(struct zone *zone,
159 enum zone_stat_item item)
160{
161 long x = atomic_long_read(&zone->vm_stat[item]);
162#ifdef CONFIG_SMP
163 if (x < 0)
164 x = 0;
165#endif
166 return x;
167}
168
4f98a2fe
RR
169extern unsigned long global_lru_pages(void);
170
171static inline unsigned long zone_lru_pages(struct zone *zone)
172{
173 return (zone_page_state(zone, NR_ACTIVE_ANON)
174 + zone_page_state(zone, NR_ACTIVE_FILE)
175 + zone_page_state(zone, NR_INACTIVE_ANON)
176 + zone_page_state(zone, NR_INACTIVE_FILE));
177}
178
2244b95a
CL
179#ifdef CONFIG_NUMA
180/*
181 * Determine the per node value of a stat item. This function
182 * is called frequently in a NUMA machine, so try to be as
183 * frugal as possible.
184 */
185static inline unsigned long node_page_state(int node,
186 enum zone_stat_item item)
187{
188 struct zone *zones = NODE_DATA(node)->node_zones;
189
190 return
4b51d669
CL
191#ifdef CONFIG_ZONE_DMA
192 zone_page_state(&zones[ZONE_DMA], item) +
193#endif
fb0e7942 194#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
195 zone_page_state(&zones[ZONE_DMA32], item) +
196#endif
2244b95a
CL
197#ifdef CONFIG_HIGHMEM
198 zone_page_state(&zones[ZONE_HIGHMEM], item) +
199#endif
2a1e274a
MG
200 zone_page_state(&zones[ZONE_NORMAL], item) +
201 zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a 202}
ca889e6c 203
18ea7e71 204extern void zone_statistics(struct zone *, struct zone *);
ca889e6c 205
2244b95a 206#else
ca889e6c 207
2244b95a 208#define node_page_state(node, item) global_page_state(item)
ca889e6c
CL
209#define zone_statistics(_zl,_z) do { } while (0)
210
211#endif /* CONFIG_NUMA */
2244b95a 212
2244b95a
CL
213#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
214#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
215
216static inline void zap_zone_vm_stats(struct zone *zone)
217{
218 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
219}
220
ca889e6c
CL
221extern void inc_zone_state(struct zone *, enum zone_stat_item);
222
2244b95a
CL
223#ifdef CONFIG_SMP
224void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
225void __inc_zone_page_state(struct page *, enum zone_stat_item);
226void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 227
2244b95a
CL
228void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
229void inc_zone_page_state(struct page *, enum zone_stat_item);
230void dec_zone_page_state(struct page *, enum zone_stat_item);
231
232extern void inc_zone_state(struct zone *, enum zone_stat_item);
c8785385
CL
233extern void __inc_zone_state(struct zone *, enum zone_stat_item);
234extern void dec_zone_state(struct zone *, enum zone_stat_item);
235extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a
CL
236
237void refresh_cpu_vm_stats(int);
2244b95a
CL
238#else /* CONFIG_SMP */
239
240/*
241 * We do not maintain differentials in a single processor configuration.
242 * The functions directly modify the zone and global counters.
243 */
244static inline void __mod_zone_page_state(struct zone *zone,
245 enum zone_stat_item item, int delta)
246{
247 zone_page_state_add(delta, zone, item);
248}
249
7f4599e9
CL
250static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
251{
252 atomic_long_inc(&zone->vm_stat[item]);
253 atomic_long_inc(&vm_stat[item]);
254}
255
2244b95a
CL
256static inline void __inc_zone_page_state(struct page *page,
257 enum zone_stat_item item)
258{
7f4599e9 259 __inc_zone_state(page_zone(page), item);
2244b95a
CL
260}
261
c8785385
CL
262static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
263{
264 atomic_long_dec(&zone->vm_stat[item]);
265 atomic_long_dec(&vm_stat[item]);
266}
267
2244b95a
CL
268static inline void __dec_zone_page_state(struct page *page,
269 enum zone_stat_item item)
270{
57ce36fe 271 __dec_zone_state(page_zone(page), item);
2244b95a
CL
272}
273
274/*
275 * We only use atomic operations to update counters. So there is no need to
276 * disable interrupts.
277 */
278#define inc_zone_page_state __inc_zone_page_state
279#define dec_zone_page_state __dec_zone_page_state
280#define mod_zone_page_state __mod_zone_page_state
281
282static inline void refresh_cpu_vm_stats(int cpu) { }
2244b95a
CL
283#endif
284
285#endif /* _LINUX_VMSTAT_H */