]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/vmstat.h
unevictable lru: add event counting with statistics
[net-next-2.6.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
96177299 6#include <linux/mm.h>
2244b95a
CL
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
f6ac2354 9
4b51d669
CL
10#ifdef CONFIG_ZONE_DMA
11#define DMA_ZONE(xx) xx##_DMA,
12#else
13#define DMA_ZONE(xx)
14#endif
15
27bf71c2
CL
16#ifdef CONFIG_ZONE_DMA32
17#define DMA32_ZONE(xx) xx##_DMA32,
18#else
19#define DMA32_ZONE(xx)
20#endif
21
22#ifdef CONFIG_HIGHMEM
23#define HIGHMEM_ZONE(xx) , xx##_HIGH
24#else
25#define HIGHMEM_ZONE(xx)
26#endif
27
3b116300 28
2a1e274a 29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
f8891e5e
CL
30
31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 FOR_ALL_ZONES(PGALLOC),
33 PGFREE, PGACTIVATE, PGDEACTIVATE,
34 PGFAULT, PGMAJFAULT,
35 FOR_ALL_ZONES(PGREFILL),
36 FOR_ALL_ZONES(PGSTEAL),
37 FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 FOR_ALL_ZONES(PGSCAN_DIRECT),
39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
40 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
3b116300
AL
41#ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
bbfd28ee
LS
43#endif
44#ifdef CONFIG_UNEVICTABLE_LRU
45 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
46 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
47 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
3b116300 48#endif
f8891e5e
CL
49 NR_VM_EVENT_ITEMS
50};
51
c748e134
AB
52extern const struct seq_operations fragmentation_op;
53extern const struct seq_operations pagetypeinfo_op;
54extern const struct seq_operations zoneinfo_op;
55extern const struct seq_operations vmstat_op;
56extern int sysctl_stat_interval;
57
780a0656
AM
58#ifdef CONFIG_VM_EVENT_COUNTERS
59/*
60 * Light weight per cpu counter implementation.
61 *
62 * Counters should only be incremented and no critical kernel component
63 * should rely on the counter values.
64 *
65 * Counters are handled completely inline. On many platforms the code
66 * generated will simply be the increment of a global address.
67 */
68
f8891e5e
CL
69struct vm_event_state {
70 unsigned long event[NR_VM_EVENT_ITEMS];
f6ac2354
CL
71};
72
f8891e5e
CL
73DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
74
75static inline void __count_vm_event(enum vm_event_item item)
76{
38cbcdc0 77 __get_cpu_var(vm_event_states).event[item]++;
f8891e5e
CL
78}
79
80static inline void count_vm_event(enum vm_event_item item)
81{
38cbcdc0 82 get_cpu_var(vm_event_states).event[item]++;
f8891e5e
CL
83 put_cpu();
84}
85
86static inline void __count_vm_events(enum vm_event_item item, long delta)
87{
38cbcdc0 88 __get_cpu_var(vm_event_states).event[item] += delta;
f8891e5e
CL
89}
90
91static inline void count_vm_events(enum vm_event_item item, long delta)
92{
38cbcdc0 93 get_cpu_var(vm_event_states).event[item] += delta;
f8891e5e
CL
94 put_cpu();
95}
96
97extern void all_vm_events(unsigned long *);
e903387f 98#ifdef CONFIG_HOTPLUG
f8891e5e 99extern void vm_events_fold_cpu(int cpu);
e903387f
MD
100#else
101static inline void vm_events_fold_cpu(int cpu)
102{
103}
104#endif
f8891e5e
CL
105
106#else
107
108/* Disable counters */
780a0656
AM
109static inline void count_vm_event(enum vm_event_item item)
110{
111}
112static inline void count_vm_events(enum vm_event_item item, long delta)
113{
114}
115static inline void __count_vm_event(enum vm_event_item item)
116{
117}
118static inline void __count_vm_events(enum vm_event_item item, long delta)
119{
120}
121static inline void all_vm_events(unsigned long *ret)
122{
123}
124static inline void vm_events_fold_cpu(int cpu)
125{
126}
f8891e5e
CL
127
128#endif /* CONFIG_VM_EVENT_COUNTERS */
129
130#define __count_zone_vm_events(item, zone, delta) \
4b51d669
CL
131 __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
132 zone_idx(zone), delta)
f6ac2354 133
2244b95a
CL
134/*
135 * Zone based page accounting with per cpu differentials.
136 */
137extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
138
139static inline void zone_page_state_add(long x, struct zone *zone,
140 enum zone_stat_item item)
141{
142 atomic_long_add(x, &zone->vm_stat[item]);
143 atomic_long_add(x, &vm_stat[item]);
144}
145
146static inline unsigned long global_page_state(enum zone_stat_item item)
147{
148 long x = atomic_long_read(&vm_stat[item]);
149#ifdef CONFIG_SMP
150 if (x < 0)
151 x = 0;
152#endif
153 return x;
154}
155
156static inline unsigned long zone_page_state(struct zone *zone,
157 enum zone_stat_item item)
158{
159 long x = atomic_long_read(&zone->vm_stat[item]);
160#ifdef CONFIG_SMP
161 if (x < 0)
162 x = 0;
163#endif
164 return x;
165}
166
4f98a2fe
RR
167extern unsigned long global_lru_pages(void);
168
169static inline unsigned long zone_lru_pages(struct zone *zone)
170{
171 return (zone_page_state(zone, NR_ACTIVE_ANON)
172 + zone_page_state(zone, NR_ACTIVE_FILE)
173 + zone_page_state(zone, NR_INACTIVE_ANON)
174 + zone_page_state(zone, NR_INACTIVE_FILE));
175}
176
2244b95a
CL
177#ifdef CONFIG_NUMA
178/*
179 * Determine the per node value of a stat item. This function
180 * is called frequently in a NUMA machine, so try to be as
181 * frugal as possible.
182 */
183static inline unsigned long node_page_state(int node,
184 enum zone_stat_item item)
185{
186 struct zone *zones = NODE_DATA(node)->node_zones;
187
188 return
4b51d669
CL
189#ifdef CONFIG_ZONE_DMA
190 zone_page_state(&zones[ZONE_DMA], item) +
191#endif
fb0e7942 192#ifdef CONFIG_ZONE_DMA32
2244b95a
CL
193 zone_page_state(&zones[ZONE_DMA32], item) +
194#endif
2244b95a
CL
195#ifdef CONFIG_HIGHMEM
196 zone_page_state(&zones[ZONE_HIGHMEM], item) +
197#endif
2a1e274a
MG
198 zone_page_state(&zones[ZONE_NORMAL], item) +
199 zone_page_state(&zones[ZONE_MOVABLE], item);
2244b95a 200}
ca889e6c 201
18ea7e71 202extern void zone_statistics(struct zone *, struct zone *);
ca889e6c 203
2244b95a 204#else
ca889e6c 205
2244b95a 206#define node_page_state(node, item) global_page_state(item)
ca889e6c
CL
207#define zone_statistics(_zl,_z) do { } while (0)
208
209#endif /* CONFIG_NUMA */
2244b95a
CL
210
211#define __add_zone_page_state(__z, __i, __d) \
212 __mod_zone_page_state(__z, __i, __d)
213#define __sub_zone_page_state(__z, __i, __d) \
214 __mod_zone_page_state(__z, __i,-(__d))
215
216#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
217#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
218
219static inline void zap_zone_vm_stats(struct zone *zone)
220{
221 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
222}
223
ca889e6c
CL
224extern void inc_zone_state(struct zone *, enum zone_stat_item);
225
2244b95a
CL
226#ifdef CONFIG_SMP
227void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
228void __inc_zone_page_state(struct page *, enum zone_stat_item);
229void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 230
2244b95a
CL
231void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
232void inc_zone_page_state(struct page *, enum zone_stat_item);
233void dec_zone_page_state(struct page *, enum zone_stat_item);
234
235extern void inc_zone_state(struct zone *, enum zone_stat_item);
c8785385
CL
236extern void __inc_zone_state(struct zone *, enum zone_stat_item);
237extern void dec_zone_state(struct zone *, enum zone_stat_item);
238extern void __dec_zone_state(struct zone *, enum zone_stat_item);
2244b95a
CL
239
240void refresh_cpu_vm_stats(int);
2244b95a
CL
241#else /* CONFIG_SMP */
242
243/*
244 * We do not maintain differentials in a single processor configuration.
245 * The functions directly modify the zone and global counters.
246 */
247static inline void __mod_zone_page_state(struct zone *zone,
248 enum zone_stat_item item, int delta)
249{
250 zone_page_state_add(delta, zone, item);
251}
252
7f4599e9
CL
253static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
254{
255 atomic_long_inc(&zone->vm_stat[item]);
256 atomic_long_inc(&vm_stat[item]);
257}
258
2244b95a
CL
259static inline void __inc_zone_page_state(struct page *page,
260 enum zone_stat_item item)
261{
7f4599e9 262 __inc_zone_state(page_zone(page), item);
2244b95a
CL
263}
264
c8785385
CL
265static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
266{
267 atomic_long_dec(&zone->vm_stat[item]);
268 atomic_long_dec(&vm_stat[item]);
269}
270
2244b95a
CL
271static inline void __dec_zone_page_state(struct page *page,
272 enum zone_stat_item item)
273{
57ce36fe 274 __dec_zone_state(page_zone(page), item);
2244b95a
CL
275}
276
277/*
278 * We only use atomic operations to update counters. So there is no need to
279 * disable interrupts.
280 */
281#define inc_zone_page_state __inc_zone_page_state
282#define dec_zone_page_state __dec_zone_page_state
283#define mod_zone_page_state __mod_zone_page_state
284
285static inline void refresh_cpu_vm_stats(int cpu) { }
2244b95a
CL
286#endif
287
288#endif /* _LINUX_VMSTAT_H */