]>
Commit | Line | Data |
---|---|---|
f6ac2354 CL |
1 | #ifndef _LINUX_VMSTAT_H |
2 | #define _LINUX_VMSTAT_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/percpu.h> | |
2244b95a CL |
6 | #include <linux/mmzone.h> |
7 | #include <asm/atomic.h> | |
f6ac2354 | 8 | |
f8891e5e | 9 | #ifdef CONFIG_VM_EVENT_COUNTERS |
f6ac2354 | 10 | /* |
f8891e5e | 11 | * Light weight per cpu counter implementation. |
f6ac2354 | 12 | * |
2aea4fb6 PJ |
13 | * Counters should only be incremented. You need to set EMBEDDED |
14 | * to disable VM_EVENT_COUNTERS. Things like procps (vmstat, | |
15 | * top, etc) use /proc/vmstat and depend on these counters. | |
f8891e5e CL |
16 | * |
17 | * Counters are handled completely inline. On many platforms the code | |
18 | * generated will simply be the increment of a global address. | |
f6ac2354 | 19 | */ |
f8891e5e | 20 | |
27bf71c2 CL |
21 | #ifdef CONFIG_ZONE_DMA32 |
22 | #define DMA32_ZONE(xx) xx##_DMA32, | |
23 | #else | |
24 | #define DMA32_ZONE(xx) | |
25 | #endif | |
26 | ||
27 | #ifdef CONFIG_HIGHMEM | |
28 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | |
29 | #else | |
30 | #define HIGHMEM_ZONE(xx) | |
31 | #endif | |
32 | ||
33 | #define FOR_ALL_ZONES(xx) xx##_DMA, DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) | |
f8891e5e CL |
34 | |
35 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |
36 | FOR_ALL_ZONES(PGALLOC), | |
37 | PGFREE, PGACTIVATE, PGDEACTIVATE, | |
38 | PGFAULT, PGMAJFAULT, | |
39 | FOR_ALL_ZONES(PGREFILL), | |
40 | FOR_ALL_ZONES(PGSTEAL), | |
41 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | |
42 | FOR_ALL_ZONES(PGSCAN_DIRECT), | |
43 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | |
44 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | |
45 | NR_VM_EVENT_ITEMS | |
46 | }; | |
47 | ||
48 | struct vm_event_state { | |
49 | unsigned long event[NR_VM_EVENT_ITEMS]; | |
f6ac2354 CL |
50 | }; |
51 | ||
f8891e5e CL |
52 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
53 | ||
54 | static inline void __count_vm_event(enum vm_event_item item) | |
55 | { | |
38cbcdc0 | 56 | __get_cpu_var(vm_event_states).event[item]++; |
f8891e5e CL |
57 | } |
58 | ||
59 | static inline void count_vm_event(enum vm_event_item item) | |
60 | { | |
38cbcdc0 | 61 | get_cpu_var(vm_event_states).event[item]++; |
f8891e5e CL |
62 | put_cpu(); |
63 | } | |
64 | ||
65 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
66 | { | |
38cbcdc0 | 67 | __get_cpu_var(vm_event_states).event[item] += delta; |
f8891e5e CL |
68 | } |
69 | ||
70 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
71 | { | |
38cbcdc0 | 72 | get_cpu_var(vm_event_states).event[item] += delta; |
f8891e5e CL |
73 | put_cpu(); |
74 | } | |
75 | ||
76 | extern void all_vm_events(unsigned long *); | |
e903387f | 77 | #ifdef CONFIG_HOTPLUG |
f8891e5e | 78 | extern void vm_events_fold_cpu(int cpu); |
e903387f MD |
79 | #else |
80 | static inline void vm_events_fold_cpu(int cpu) | |
81 | { | |
82 | } | |
83 | #endif | |
f8891e5e CL |
84 | |
85 | #else | |
86 | ||
87 | /* Disable counters */ | |
88 | #define get_cpu_vm_events(e) 0L | |
89 | #define count_vm_event(e) do { } while (0) | |
90 | #define count_vm_events(e,d) do { } while (0) | |
91 | #define __count_vm_event(e) do { } while (0) | |
92 | #define __count_vm_events(e,d) do { } while (0) | |
93 | #define vm_events_fold_cpu(x) do { } while (0) | |
94 | ||
95 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
96 | ||
97 | #define __count_zone_vm_events(item, zone, delta) \ | |
98 | __count_vm_events(item##_DMA + zone_idx(zone), delta) | |
f6ac2354 | 99 | |
2244b95a CL |
100 | /* |
101 | * Zone based page accounting with per cpu differentials. | |
102 | */ | |
103 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
104 | ||
105 | static inline void zone_page_state_add(long x, struct zone *zone, | |
106 | enum zone_stat_item item) | |
107 | { | |
108 | atomic_long_add(x, &zone->vm_stat[item]); | |
109 | atomic_long_add(x, &vm_stat[item]); | |
110 | } | |
111 | ||
112 | static inline unsigned long global_page_state(enum zone_stat_item item) | |
113 | { | |
114 | long x = atomic_long_read(&vm_stat[item]); | |
115 | #ifdef CONFIG_SMP | |
116 | if (x < 0) | |
117 | x = 0; | |
118 | #endif | |
119 | return x; | |
120 | } | |
121 | ||
122 | static inline unsigned long zone_page_state(struct zone *zone, | |
123 | enum zone_stat_item item) | |
124 | { | |
125 | long x = atomic_long_read(&zone->vm_stat[item]); | |
126 | #ifdef CONFIG_SMP | |
127 | if (x < 0) | |
128 | x = 0; | |
129 | #endif | |
130 | return x; | |
131 | } | |
132 | ||
133 | #ifdef CONFIG_NUMA | |
134 | /* | |
135 | * Determine the per node value of a stat item. This function | |
136 | * is called frequently in a NUMA machine, so try to be as | |
137 | * frugal as possible. | |
138 | */ | |
139 | static inline unsigned long node_page_state(int node, | |
140 | enum zone_stat_item item) | |
141 | { | |
142 | struct zone *zones = NODE_DATA(node)->node_zones; | |
143 | ||
144 | return | |
fb0e7942 | 145 | #ifdef CONFIG_ZONE_DMA32 |
2244b95a CL |
146 | zone_page_state(&zones[ZONE_DMA32], item) + |
147 | #endif | |
148 | zone_page_state(&zones[ZONE_NORMAL], item) + | |
2244b95a CL |
149 | #ifdef CONFIG_HIGHMEM |
150 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | |
151 | #endif | |
152 | zone_page_state(&zones[ZONE_DMA], item); | |
153 | } | |
ca889e6c CL |
154 | |
155 | extern void zone_statistics(struct zonelist *, struct zone *); | |
156 | ||
2244b95a | 157 | #else |
ca889e6c | 158 | |
2244b95a | 159 | #define node_page_state(node, item) global_page_state(item) |
ca889e6c CL |
160 | #define zone_statistics(_zl,_z) do { } while (0) |
161 | ||
162 | #endif /* CONFIG_NUMA */ | |
2244b95a CL |
163 | |
164 | #define __add_zone_page_state(__z, __i, __d) \ | |
165 | __mod_zone_page_state(__z, __i, __d) | |
166 | #define __sub_zone_page_state(__z, __i, __d) \ | |
167 | __mod_zone_page_state(__z, __i,-(__d)) | |
168 | ||
169 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) | |
170 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | |
171 | ||
172 | static inline void zap_zone_vm_stats(struct zone *zone) | |
173 | { | |
174 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); | |
175 | } | |
176 | ||
ca889e6c CL |
177 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
178 | ||
2244b95a CL |
179 | #ifdef CONFIG_SMP |
180 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | |
181 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | |
182 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | |
f6ac2354 | 183 | |
2244b95a CL |
184 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
185 | void inc_zone_page_state(struct page *, enum zone_stat_item); | |
186 | void dec_zone_page_state(struct page *, enum zone_stat_item); | |
187 | ||
188 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | |
c8785385 CL |
189 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
190 | extern void dec_zone_state(struct zone *, enum zone_stat_item); | |
191 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | |
2244b95a CL |
192 | |
193 | void refresh_cpu_vm_stats(int); | |
194 | void refresh_vm_stats(void); | |
195 | ||
196 | #else /* CONFIG_SMP */ | |
197 | ||
198 | /* | |
199 | * We do not maintain differentials in a single processor configuration. | |
200 | * The functions directly modify the zone and global counters. | |
201 | */ | |
202 | static inline void __mod_zone_page_state(struct zone *zone, | |
203 | enum zone_stat_item item, int delta) | |
204 | { | |
205 | zone_page_state_add(delta, zone, item); | |
206 | } | |
207 | ||
7f4599e9 CL |
208 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
209 | { | |
210 | atomic_long_inc(&zone->vm_stat[item]); | |
211 | atomic_long_inc(&vm_stat[item]); | |
212 | } | |
213 | ||
2244b95a CL |
214 | static inline void __inc_zone_page_state(struct page *page, |
215 | enum zone_stat_item item) | |
216 | { | |
7f4599e9 | 217 | __inc_zone_state(page_zone(page), item); |
2244b95a CL |
218 | } |
219 | ||
c8785385 CL |
220 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
221 | { | |
222 | atomic_long_dec(&zone->vm_stat[item]); | |
223 | atomic_long_dec(&vm_stat[item]); | |
224 | } | |
225 | ||
2244b95a CL |
226 | static inline void __dec_zone_page_state(struct page *page, |
227 | enum zone_stat_item item) | |
228 | { | |
229 | atomic_long_dec(&page_zone(page)->vm_stat[item]); | |
230 | atomic_long_dec(&vm_stat[item]); | |
231 | } | |
232 | ||
233 | /* | |
234 | * We only use atomic operations to update counters. So there is no need to | |
235 | * disable interrupts. | |
236 | */ | |
237 | #define inc_zone_page_state __inc_zone_page_state | |
238 | #define dec_zone_page_state __dec_zone_page_state | |
239 | #define mod_zone_page_state __mod_zone_page_state | |
240 | ||
241 | static inline void refresh_cpu_vm_stats(int cpu) { } | |
242 | static inline void refresh_vm_stats(void) { } | |
243 | #endif | |
244 | ||
245 | #endif /* _LINUX_VMSTAT_H */ |