]>
Commit | Line | Data |
---|---|---|
f6ac2354 CL |
1 | #ifndef _LINUX_VMSTAT_H |
2 | #define _LINUX_VMSTAT_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/percpu.h> | |
2244b95a CL |
6 | #include <linux/config.h> |
7 | #include <linux/mmzone.h> | |
8 | #include <asm/atomic.h> | |
f6ac2354 | 9 | |
f8891e5e | 10 | #ifdef CONFIG_VM_EVENT_COUNTERS |
f6ac2354 | 11 | /* |
f8891e5e | 12 | * Light weight per cpu counter implementation. |
f6ac2354 | 13 | * |
f8891e5e CL |
14 | * Counters should only be incremented and no critical kernel component |
15 | * should rely on the counter values. | |
16 | * | |
17 | * Counters are handled completely inline. On many platforms the code | |
18 | * generated will simply be the increment of a global address. | |
f6ac2354 | 19 | */ |
f8891e5e CL |
20 | |
21 | #define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH | |
22 | ||
23 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |
24 | FOR_ALL_ZONES(PGALLOC), | |
25 | PGFREE, PGACTIVATE, PGDEACTIVATE, | |
26 | PGFAULT, PGMAJFAULT, | |
27 | FOR_ALL_ZONES(PGREFILL), | |
28 | FOR_ALL_ZONES(PGSTEAL), | |
29 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | |
30 | FOR_ALL_ZONES(PGSCAN_DIRECT), | |
31 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | |
32 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | |
33 | NR_VM_EVENT_ITEMS | |
34 | }; | |
35 | ||
36 | struct vm_event_state { | |
37 | unsigned long event[NR_VM_EVENT_ITEMS]; | |
f6ac2354 CL |
38 | }; |
39 | ||
f8891e5e CL |
40 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
41 | ||
42 | static inline void __count_vm_event(enum vm_event_item item) | |
43 | { | |
38cbcdc0 | 44 | __get_cpu_var(vm_event_states).event[item]++; |
f8891e5e CL |
45 | } |
46 | ||
47 | static inline void count_vm_event(enum vm_event_item item) | |
48 | { | |
38cbcdc0 | 49 | get_cpu_var(vm_event_states).event[item]++; |
f8891e5e CL |
50 | put_cpu(); |
51 | } | |
52 | ||
53 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
54 | { | |
38cbcdc0 | 55 | __get_cpu_var(vm_event_states).event[item] += delta; |
f8891e5e CL |
56 | } |
57 | ||
58 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
59 | { | |
38cbcdc0 | 60 | get_cpu_var(vm_event_states).event[item] += delta; |
f8891e5e CL |
61 | put_cpu(); |
62 | } | |
63 | ||
64 | extern void all_vm_events(unsigned long *); | |
65 | extern void vm_events_fold_cpu(int cpu); | |
66 | ||
67 | #else | |
68 | ||
69 | /* Disable counters */ | |
70 | #define get_cpu_vm_events(e) 0L | |
71 | #define count_vm_event(e) do { } while (0) | |
72 | #define count_vm_events(e,d) do { } while (0) | |
73 | #define __count_vm_event(e) do { } while (0) | |
74 | #define __count_vm_events(e,d) do { } while (0) | |
75 | #define vm_events_fold_cpu(x) do { } while (0) | |
76 | ||
77 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
78 | ||
79 | #define __count_zone_vm_events(item, zone, delta) \ | |
80 | __count_vm_events(item##_DMA + zone_idx(zone), delta) | |
f6ac2354 | 81 | |
2244b95a CL |
82 | /* |
83 | * Zone based page accounting with per cpu differentials. | |
84 | */ | |
85 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
86 | ||
87 | static inline void zone_page_state_add(long x, struct zone *zone, | |
88 | enum zone_stat_item item) | |
89 | { | |
90 | atomic_long_add(x, &zone->vm_stat[item]); | |
91 | atomic_long_add(x, &vm_stat[item]); | |
92 | } | |
93 | ||
94 | static inline unsigned long global_page_state(enum zone_stat_item item) | |
95 | { | |
96 | long x = atomic_long_read(&vm_stat[item]); | |
97 | #ifdef CONFIG_SMP | |
98 | if (x < 0) | |
99 | x = 0; | |
100 | #endif | |
101 | return x; | |
102 | } | |
103 | ||
104 | static inline unsigned long zone_page_state(struct zone *zone, | |
105 | enum zone_stat_item item) | |
106 | { | |
107 | long x = atomic_long_read(&zone->vm_stat[item]); | |
108 | #ifdef CONFIG_SMP | |
109 | if (x < 0) | |
110 | x = 0; | |
111 | #endif | |
112 | return x; | |
113 | } | |
114 | ||
115 | #ifdef CONFIG_NUMA | |
116 | /* | |
117 | * Determine the per node value of a stat item. This function | |
118 | * is called frequently in a NUMA machine, so try to be as | |
119 | * frugal as possible. | |
120 | */ | |
121 | static inline unsigned long node_page_state(int node, | |
122 | enum zone_stat_item item) | |
123 | { | |
124 | struct zone *zones = NODE_DATA(node)->node_zones; | |
125 | ||
126 | return | |
fb0e7942 | 127 | #ifdef CONFIG_ZONE_DMA32 |
2244b95a CL |
128 | zone_page_state(&zones[ZONE_DMA32], item) + |
129 | #endif | |
130 | zone_page_state(&zones[ZONE_NORMAL], item) + | |
2244b95a CL |
131 | #ifdef CONFIG_HIGHMEM |
132 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | |
133 | #endif | |
134 | zone_page_state(&zones[ZONE_DMA], item); | |
135 | } | |
ca889e6c CL |
136 | |
137 | extern void zone_statistics(struct zonelist *, struct zone *); | |
138 | ||
2244b95a | 139 | #else |
ca889e6c | 140 | |
2244b95a | 141 | #define node_page_state(node, item) global_page_state(item) |
ca889e6c CL |
142 | #define zone_statistics(_zl,_z) do { } while (0) |
143 | ||
144 | #endif /* CONFIG_NUMA */ | |
2244b95a CL |
145 | |
146 | #define __add_zone_page_state(__z, __i, __d) \ | |
147 | __mod_zone_page_state(__z, __i, __d) | |
148 | #define __sub_zone_page_state(__z, __i, __d) \ | |
149 | __mod_zone_page_state(__z, __i,-(__d)) | |
150 | ||
151 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) | |
152 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | |
153 | ||
154 | static inline void zap_zone_vm_stats(struct zone *zone) | |
155 | { | |
156 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); | |
157 | } | |
158 | ||
ca889e6c CL |
159 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
160 | ||
2244b95a CL |
161 | #ifdef CONFIG_SMP |
162 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | |
163 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | |
164 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | |
f6ac2354 | 165 | |
2244b95a CL |
166 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
167 | void inc_zone_page_state(struct page *, enum zone_stat_item); | |
168 | void dec_zone_page_state(struct page *, enum zone_stat_item); | |
169 | ||
170 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | |
171 | ||
172 | void refresh_cpu_vm_stats(int); | |
173 | void refresh_vm_stats(void); | |
174 | ||
175 | #else /* CONFIG_SMP */ | |
176 | ||
177 | /* | |
178 | * We do not maintain differentials in a single processor configuration. | |
179 | * The functions directly modify the zone and global counters. | |
180 | */ | |
181 | static inline void __mod_zone_page_state(struct zone *zone, | |
182 | enum zone_stat_item item, int delta) | |
183 | { | |
184 | zone_page_state_add(delta, zone, item); | |
185 | } | |
186 | ||
7f4599e9 CL |
187 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
188 | { | |
189 | atomic_long_inc(&zone->vm_stat[item]); | |
190 | atomic_long_inc(&vm_stat[item]); | |
191 | } | |
192 | ||
2244b95a CL |
193 | static inline void __inc_zone_page_state(struct page *page, |
194 | enum zone_stat_item item) | |
195 | { | |
7f4599e9 | 196 | __inc_zone_state(page_zone(page), item); |
2244b95a CL |
197 | } |
198 | ||
199 | static inline void __dec_zone_page_state(struct page *page, | |
200 | enum zone_stat_item item) | |
201 | { | |
202 | atomic_long_dec(&page_zone(page)->vm_stat[item]); | |
203 | atomic_long_dec(&vm_stat[item]); | |
204 | } | |
205 | ||
206 | /* | |
207 | * We only use atomic operations to update counters. So there is no need to | |
208 | * disable interrupts. | |
209 | */ | |
210 | #define inc_zone_page_state __inc_zone_page_state | |
211 | #define dec_zone_page_state __dec_zone_page_state | |
212 | #define mod_zone_page_state __mod_zone_page_state | |
213 | ||
214 | static inline void refresh_cpu_vm_stats(int cpu) { } | |
215 | static inline void refresh_vm_stats(void) { } | |
216 | #endif | |
217 | ||
218 | #endif /* _LINUX_VMSTAT_H */ |