]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/vmstat.h
[PATCH] zoned vm counters: create vmstat.c/.h from page_alloc.c/.h
[net-next-2.6.git] / include / linux / vmstat.h
CommitLineData
f6ac2354
CL
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6
7/*
8 * Global page accounting. One instance per CPU. Only unsigned longs are
9 * allowed.
10 *
11 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
12 * any time safely (which protects the instance from modification by
13 * interrupt.
14 * - The __xxx_page_state variants can be used safely when interrupts are
15 * disabled.
16 * - The __xxx_page_state variants can be used if the field is only
17 * modified from process context and protected from preemption, or only
18 * modified from interrupt context. In this case, the field should be
19 * commented here.
20 */
21struct page_state {
22 unsigned long nr_dirty; /* Dirty writeable pages */
23 unsigned long nr_writeback; /* Pages under writeback */
24 unsigned long nr_unstable; /* NFS unstable pages */
25 unsigned long nr_page_table_pages;/* Pages used for pagetables */
26 unsigned long nr_mapped; /* mapped into pagetables.
27 * only modified from process context */
28 unsigned long nr_slab; /* In slab */
29#define GET_PAGE_STATE_LAST nr_slab
30
31 /*
32 * The below are zeroed by get_page_state(). Use get_full_page_state()
33 * to add up all these.
34 */
35 unsigned long pgpgin; /* Disk reads */
36 unsigned long pgpgout; /* Disk writes */
37 unsigned long pswpin; /* swap reads */
38 unsigned long pswpout; /* swap writes */
39
40 unsigned long pgalloc_high; /* page allocations */
41 unsigned long pgalloc_normal;
42 unsigned long pgalloc_dma32;
43 unsigned long pgalloc_dma;
44
45 unsigned long pgfree; /* page freeings */
46 unsigned long pgactivate; /* pages moved inactive->active */
47 unsigned long pgdeactivate; /* pages moved active->inactive */
48
49 unsigned long pgfault; /* faults (major+minor) */
50 unsigned long pgmajfault; /* faults (major only) */
51
52 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
53 unsigned long pgrefill_normal;
54 unsigned long pgrefill_dma32;
55 unsigned long pgrefill_dma;
56
57 unsigned long pgsteal_high; /* total highmem pages reclaimed */
58 unsigned long pgsteal_normal;
59 unsigned long pgsteal_dma32;
60 unsigned long pgsteal_dma;
61
62 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
63 unsigned long pgscan_kswapd_normal;
64 unsigned long pgscan_kswapd_dma32;
65 unsigned long pgscan_kswapd_dma;
66
67 unsigned long pgscan_direct_high;/* total highmem pages scanned */
68 unsigned long pgscan_direct_normal;
69 unsigned long pgscan_direct_dma32;
70 unsigned long pgscan_direct_dma;
71
72 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
73 unsigned long slabs_scanned; /* slab objects scanned */
74 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
75 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
76 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
77 unsigned long allocstall; /* direct reclaim calls */
78
79 unsigned long pgrotated; /* pages rotated to tail of the LRU */
80 unsigned long nr_bounce; /* pages for bounce buffers */
81};
82
83extern void get_page_state(struct page_state *ret);
84extern void get_page_state_node(struct page_state *ret, int node);
85extern void get_full_page_state(struct page_state *ret);
86extern unsigned long read_page_state_offset(unsigned long offset);
87extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
88extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
89
90#define read_page_state(member) \
91 read_page_state_offset(offsetof(struct page_state, member))
92
93#define mod_page_state(member, delta) \
94 mod_page_state_offset(offsetof(struct page_state, member), (delta))
95
96#define __mod_page_state(member, delta) \
97 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
98
99#define inc_page_state(member) mod_page_state(member, 1UL)
100#define dec_page_state(member) mod_page_state(member, 0UL - 1)
101#define add_page_state(member,delta) mod_page_state(member, (delta))
102#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
103
104#define __inc_page_state(member) __mod_page_state(member, 1UL)
105#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
106#define __add_page_state(member,delta) __mod_page_state(member, (delta))
107#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
108
109#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
110
111#define state_zone_offset(zone, member) \
112({ \
113 unsigned offset; \
114 if (is_highmem(zone)) \
115 offset = offsetof(struct page_state, member##_high); \
116 else if (is_normal(zone)) \
117 offset = offsetof(struct page_state, member##_normal); \
118 else if (is_dma32(zone)) \
119 offset = offsetof(struct page_state, member##_dma32); \
120 else \
121 offset = offsetof(struct page_state, member##_dma); \
122 offset; \
123})
124
125#define __mod_page_state_zone(zone, member, delta) \
126 do { \
127 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
128 } while (0)
129
130#define mod_page_state_zone(zone, member, delta) \
131 do { \
132 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
133 } while (0)
134
135DECLARE_PER_CPU(struct page_state, page_states);
136
137#endif /* _LINUX_VMSTAT_H */
138