]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/memory_hotplug.h
walk system ram range
[net-next-2.6.git] / include / linux / memory_hotplug.h
CommitLineData
208d54e5
DH
1#ifndef __LINUX_MEMORY_HOTPLUG_H
2#define __LINUX_MEMORY_HOTPLUG_H
3
4#include <linux/mmzone.h>
5#include <linux/spinlock.h>
3947be19 6#include <linux/notifier.h>
208d54e5 7
78679302
KH
8struct page;
9struct zone;
10struct pglist_data;
ea01ea93 11struct mem_section;
78679302 12
208d54e5 13#ifdef CONFIG_MEMORY_HOTPLUG
04753278
YG
14
15/*
af370fb8 16 * Types for free bootmem.
04753278
YG
17 * The normal smallest mapcount is -1. Here is smaller value than it.
18 */
af370fb8
YG
19#define SECTION_INFO (-1 - 1)
20#define MIX_SECTION_INFO (-1 - 2)
21#define NODE_INFO (-1 - 3)
04753278 22
208d54e5
DH
23/*
24 * pgdat resizing functions
25 */
26static inline
27void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
28{
29 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
30}
31static inline
32void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
33{
bdc8cb98 34 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
208d54e5
DH
35}
36static inline
37void pgdat_resize_init(struct pglist_data *pgdat)
38{
39 spin_lock_init(&pgdat->node_size_lock);
40}
bdc8cb98
DH
41/*
42 * Zone resizing functions
43 */
44static inline unsigned zone_span_seqbegin(struct zone *zone)
45{
46 return read_seqbegin(&zone->span_seqlock);
47}
48static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
49{
50 return read_seqretry(&zone->span_seqlock, iv);
51}
52static inline void zone_span_writelock(struct zone *zone)
53{
54 write_seqlock(&zone->span_seqlock);
55}
56static inline void zone_span_writeunlock(struct zone *zone)
57{
58 write_sequnlock(&zone->span_seqlock);
59}
60static inline void zone_seqlock_init(struct zone *zone)
61{
62 seqlock_init(&zone->span_seqlock);
63}
3947be19
DH
64extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
65extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
66extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
67/* need some defines for these for archs that don't support it */
68extern void online_page(struct page *page);
69/* VM interface that may be used by firmware interface */
3947be19 70extern int online_pages(unsigned long, unsigned long);
0c0e6195 71extern void __offline_isolated_pages(unsigned long, unsigned long);
48e94196
KH
72extern int offline_pages(unsigned long, unsigned long, unsigned long);
73
3947be19 74/* reasonably generic interface to expand the physical pages in a zone */
c04fc586 75extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
3947be19 76 unsigned long nr_pages);
ea01ea93
BP
77extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
78 unsigned long nr_pages);
bc02af93
YG
79
80#ifdef CONFIG_NUMA
81extern int memory_add_physaddr_to_nid(u64 start);
82#else
83static inline int memory_add_physaddr_to_nid(u64 start)
84{
85 return 0;
86}
87#endif
88
306d6cbe
YG
89#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
90/*
91 * For supporting node-hotadd, we have to allocate a new pgdat.
92 *
93 * If an arch has generic style NODE_DATA(),
94 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
95 *
96 * In general, generic_alloc_nodedata() is used.
97 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
98 *
99 */
dd0932d9
YG
100extern pg_data_t *arch_alloc_nodedata(int nid);
101extern void arch_free_nodedata(pg_data_t *pgdat);
7049027c 102extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
306d6cbe
YG
103
104#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
105
106#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
107#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
108
109#ifdef CONFIG_NUMA
110/*
111 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
112 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
113 * Because, pgdat for the new node is not allocated/initialized yet itself.
114 * To use new node's memory, more consideration will be necessary.
115 */
116#define generic_alloc_nodedata(nid) \
117({ \
118 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
119})
120/*
121 * This definition is just for error path in node hotadd.
122 * For node hotremove, we have to replace this.
123 */
124#define generic_free_nodedata(pgdat) kfree(pgdat)
125
10ad400b
YG
126extern pg_data_t *node_data[];
127static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
128{
129 node_data[nid] = pgdat;
130}
131
306d6cbe
YG
132#else /* !CONFIG_NUMA */
133
134/* never called */
135static inline pg_data_t *generic_alloc_nodedata(int nid)
136{
137 BUG();
138 return NULL;
139}
140static inline void generic_free_nodedata(pg_data_t *pgdat)
141{
142}
10ad400b
YG
143static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
144{
145}
306d6cbe
YG
146#endif /* CONFIG_NUMA */
147#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
148
04753278
YG
149#ifdef CONFIG_SPARSEMEM_VMEMMAP
150static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
151{
152}
153static inline void put_page_bootmem(struct page *page)
154{
155}
156#else
157extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
158extern void put_page_bootmem(struct page *page);
159#endif
160
208d54e5
DH
161#else /* ! CONFIG_MEMORY_HOTPLUG */
162/*
163 * Stub functions for when hotplug is off
164 */
165static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
166static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
167static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
bdc8cb98
DH
168
169static inline unsigned zone_span_seqbegin(struct zone *zone)
170{
171 return 0;
172}
173static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
174{
175 return 0;
176}
177static inline void zone_span_writelock(struct zone *zone) {}
178static inline void zone_span_writeunlock(struct zone *zone) {}
179static inline void zone_seqlock_init(struct zone *zone) {}
3947be19
DH
180
181static inline int mhp_notimplemented(const char *func)
182{
183 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
184 dump_stack();
185 return -ENOSYS;
186}
187
04753278
YG
188static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
189{
190}
191
bdc8cb98 192#endif /* ! CONFIG_MEMORY_HOTPLUG */
9d99aaa3 193
5c755e9f
BP
194#ifdef CONFIG_MEMORY_HOTREMOVE
195
196extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
197
198#else
199static inline int is_mem_section_removable(unsigned long pfn,
200 unsigned long nr_pages)
201{
202 return 0;
203}
204#endif /* CONFIG_MEMORY_HOTREMOVE */
205
bc02af93
YG
206extern int add_memory(int nid, u64 start, u64 size);
207extern int arch_add_memory(int nid, u64 start, u64 size);
9d99aaa3 208extern int remove_memory(u64 start, u64 size);
f28c5edc
KM
209extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
210 int nr_pages);
ea01ea93 211extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
04753278
YG
212extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
213 unsigned long pnum);
9d99aaa3 214
208d54e5 215#endif /* __LINUX_MEMORY_HOTPLUG_H */