]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/vmstat.h
[PATCH] zoned vm counters: conversion of nr_writeback to per zone counter
[net-next-2.6.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
2244b95a
CL
6#include <linux/config.h>
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
f6ac2354
CL
9
10/*
11 * Global page accounting. One instance per CPU. Only unsigned longs are
12 * allowed.
13 *
14 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
15 * any time safely (which protects the instance from modification by
16 * interrupt.
17 * - The __xxx_page_state variants can be used safely when interrupts are
18 * disabled.
19 * - The __xxx_page_state variants can be used if the field is only
20 * modified from process context and protected from preemption, or only
21 * modified from interrupt context. In this case, the field should be
22 * commented here.
23 */
24struct page_state {
f6ac2354 25 unsigned long nr_unstable; /* NFS unstable pages */
df849a15 26#define GET_PAGE_STATE_LAST nr_unstable
f6ac2354
CL
27
28 /*
29 * The below are zeroed by get_page_state(). Use get_full_page_state()
30 * to add up all these.
31 */
32 unsigned long pgpgin; /* Disk reads */
33 unsigned long pgpgout; /* Disk writes */
34 unsigned long pswpin; /* swap reads */
35 unsigned long pswpout; /* swap writes */
36
37 unsigned long pgalloc_high; /* page allocations */
38 unsigned long pgalloc_normal;
39 unsigned long pgalloc_dma32;
40 unsigned long pgalloc_dma;
41
42 unsigned long pgfree; /* page freeings */
43 unsigned long pgactivate; /* pages moved inactive->active */
44 unsigned long pgdeactivate; /* pages moved active->inactive */
45
46 unsigned long pgfault; /* faults (major+minor) */
47 unsigned long pgmajfault; /* faults (major only) */
48
49 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
50 unsigned long pgrefill_normal;
51 unsigned long pgrefill_dma32;
52 unsigned long pgrefill_dma;
53
54 unsigned long pgsteal_high; /* total highmem pages reclaimed */
55 unsigned long pgsteal_normal;
56 unsigned long pgsteal_dma32;
57 unsigned long pgsteal_dma;
58
59 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
60 unsigned long pgscan_kswapd_normal;
61 unsigned long pgscan_kswapd_dma32;
62 unsigned long pgscan_kswapd_dma;
63
64 unsigned long pgscan_direct_high;/* total highmem pages scanned */
65 unsigned long pgscan_direct_normal;
66 unsigned long pgscan_direct_dma32;
67 unsigned long pgscan_direct_dma;
68
69 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
70 unsigned long slabs_scanned; /* slab objects scanned */
71 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
72 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
73 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
74 unsigned long allocstall; /* direct reclaim calls */
75
76 unsigned long pgrotated; /* pages rotated to tail of the LRU */
77 unsigned long nr_bounce; /* pages for bounce buffers */
78};
79
80extern void get_page_state(struct page_state *ret);
81extern void get_page_state_node(struct page_state *ret, int node);
82extern void get_full_page_state(struct page_state *ret);
83extern unsigned long read_page_state_offset(unsigned long offset);
84extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
85extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
86
87#define read_page_state(member) \
88 read_page_state_offset(offsetof(struct page_state, member))
89
90#define mod_page_state(member, delta) \
91 mod_page_state_offset(offsetof(struct page_state, member), (delta))
92
93#define __mod_page_state(member, delta) \
94 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
95
96#define inc_page_state(member) mod_page_state(member, 1UL)
97#define dec_page_state(member) mod_page_state(member, 0UL - 1)
98#define add_page_state(member,delta) mod_page_state(member, (delta))
99#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
100
101#define __inc_page_state(member) __mod_page_state(member, 1UL)
102#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
103#define __add_page_state(member,delta) __mod_page_state(member, (delta))
104#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
105
106#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
107
108#define state_zone_offset(zone, member) \
109({ \
110 unsigned offset; \
111 if (is_highmem(zone)) \
112 offset = offsetof(struct page_state, member##_high); \
113 else if (is_normal(zone)) \
114 offset = offsetof(struct page_state, member##_normal); \
115 else if (is_dma32(zone)) \
116 offset = offsetof(struct page_state, member##_dma32); \
117 else \
118 offset = offsetof(struct page_state, member##_dma); \
119 offset; \
120})
121
122#define __mod_page_state_zone(zone, member, delta) \
123 do { \
124 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
125 } while (0)
126
127#define mod_page_state_zone(zone, member, delta) \
128 do { \
129 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
130 } while (0)
131
132DECLARE_PER_CPU(struct page_state, page_states);
133
2244b95a
CL
134/*
135 * Zone based page accounting with per cpu differentials.
136 */
137extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
138
139static inline void zone_page_state_add(long x, struct zone *zone,
140 enum zone_stat_item item)
141{
142 atomic_long_add(x, &zone->vm_stat[item]);
143 atomic_long_add(x, &vm_stat[item]);
144}
145
146static inline unsigned long global_page_state(enum zone_stat_item item)
147{
148 long x = atomic_long_read(&vm_stat[item]);
149#ifdef CONFIG_SMP
150 if (x < 0)
151 x = 0;
152#endif
153 return x;
154}
155
156static inline unsigned long zone_page_state(struct zone *zone,
157 enum zone_stat_item item)
158{
159 long x = atomic_long_read(&zone->vm_stat[item]);
160#ifdef CONFIG_SMP
161 if (x < 0)
162 x = 0;
163#endif
164 return x;
165}
166
167#ifdef CONFIG_NUMA
168/*
169 * Determine the per node value of a stat item. This function
170 * is called frequently in a NUMA machine, so try to be as
171 * frugal as possible.
172 */
173static inline unsigned long node_page_state(int node,
174 enum zone_stat_item item)
175{
176 struct zone *zones = NODE_DATA(node)->node_zones;
177
178 return
179#ifndef CONFIG_DMA_IS_NORMAL
180#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
181 zone_page_state(&zones[ZONE_DMA32], item) +
182#endif
183 zone_page_state(&zones[ZONE_NORMAL], item) +
184#endif
185#ifdef CONFIG_HIGHMEM
186 zone_page_state(&zones[ZONE_HIGHMEM], item) +
187#endif
188 zone_page_state(&zones[ZONE_DMA], item);
189}
190#else
191#define node_page_state(node, item) global_page_state(item)
192#endif
193
194#define __add_zone_page_state(__z, __i, __d) \
195 __mod_zone_page_state(__z, __i, __d)
196#define __sub_zone_page_state(__z, __i, __d) \
197 __mod_zone_page_state(__z, __i,-(__d))
198
199#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
200#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
201
202static inline void zap_zone_vm_stats(struct zone *zone)
203{
204 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
205}
206
207#ifdef CONFIG_SMP
208void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
209void __inc_zone_page_state(struct page *, enum zone_stat_item);
210void __dec_zone_page_state(struct page *, enum zone_stat_item);
f6ac2354 211
2244b95a
CL
212void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
213void inc_zone_page_state(struct page *, enum zone_stat_item);
214void dec_zone_page_state(struct page *, enum zone_stat_item);
215
216extern void inc_zone_state(struct zone *, enum zone_stat_item);
217
218void refresh_cpu_vm_stats(int);
219void refresh_vm_stats(void);
220
221#else /* CONFIG_SMP */
222
223/*
224 * We do not maintain differentials in a single processor configuration.
225 * The functions directly modify the zone and global counters.
226 */
227static inline void __mod_zone_page_state(struct zone *zone,
228 enum zone_stat_item item, int delta)
229{
230 zone_page_state_add(delta, zone, item);
231}
232
233static inline void __inc_zone_page_state(struct page *page,
234 enum zone_stat_item item)
235{
236 atomic_long_inc(&page_zone(page)->vm_stat[item]);
237 atomic_long_inc(&vm_stat[item]);
238}
239
240static inline void __dec_zone_page_state(struct page *page,
241 enum zone_stat_item item)
242{
243 atomic_long_dec(&page_zone(page)->vm_stat[item]);
244 atomic_long_dec(&vm_stat[item]);
245}
246
247/*
248 * We only use atomic operations to update counters. So there is no need to
249 * disable interrupts.
250 */
251#define inc_zone_page_state __inc_zone_page_state
252#define dec_zone_page_state __dec_zone_page_state
253#define mod_zone_page_state __mod_zone_page_state
254
255static inline void refresh_cpu_vm_stats(int cpu) { }
256static inline void refresh_vm_stats(void) { }
257#endif
258
259#endif /* _LINUX_VMSTAT_H */