]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/memory_hotplug.h
tg3: Apply 10Mbps fix to all 57765 revisions
[net-next-2.6.git] / include / linux / memory_hotplug.h
CommitLineData
208d54e5
DH
1#ifndef __LINUX_MEMORY_HOTPLUG_H
2#define __LINUX_MEMORY_HOTPLUG_H
3
4#include <linux/mmzone.h>
5#include <linux/spinlock.h>
3947be19 6#include <linux/notifier.h>
208d54e5 7
78679302
KH
8struct page;
9struct zone;
10struct pglist_data;
ea01ea93 11struct mem_section;
78679302 12
208d54e5 13#ifdef CONFIG_MEMORY_HOTPLUG
04753278
YG
14
15/*
af370fb8 16 * Types for free bootmem.
04753278
YG
17 * The normal smallest mapcount is -1. Here is smaller value than it.
18 */
af370fb8
YG
19#define SECTION_INFO (-1 - 1)
20#define MIX_SECTION_INFO (-1 - 2)
21#define NODE_INFO (-1 - 3)
04753278 22
208d54e5
DH
23/*
24 * pgdat resizing functions
25 */
26static inline
27void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
28{
29 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
30}
31static inline
32void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
33{
bdc8cb98 34 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
208d54e5
DH
35}
36static inline
37void pgdat_resize_init(struct pglist_data *pgdat)
38{
39 spin_lock_init(&pgdat->node_size_lock);
40}
bdc8cb98
DH
41/*
42 * Zone resizing functions
43 */
44static inline unsigned zone_span_seqbegin(struct zone *zone)
45{
46 return read_seqbegin(&zone->span_seqlock);
47}
48static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
49{
50 return read_seqretry(&zone->span_seqlock, iv);
51}
52static inline void zone_span_writelock(struct zone *zone)
53{
54 write_seqlock(&zone->span_seqlock);
55}
56static inline void zone_span_writeunlock(struct zone *zone)
57{
58 write_sequnlock(&zone->span_seqlock);
59}
60static inline void zone_seqlock_init(struct zone *zone)
61{
62 seqlock_init(&zone->span_seqlock);
63}
3947be19
DH
64extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
65extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
66extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
67/* need some defines for these for archs that don't support it */
68extern void online_page(struct page *page);
69/* VM interface that may be used by firmware interface */
3947be19 70extern int online_pages(unsigned long, unsigned long);
0c0e6195 71extern void __offline_isolated_pages(unsigned long, unsigned long);
48e94196 72
49ac8255
KH
73#ifdef CONFIG_MEMORY_HOTREMOVE
74extern bool is_pageblock_removable_nolock(struct page *page);
75#endif /* CONFIG_MEMORY_HOTREMOVE */
76
3947be19 77/* reasonably generic interface to expand the physical pages in a zone */
c04fc586 78extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
3947be19 79 unsigned long nr_pages);
ea01ea93
BP
80extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
81 unsigned long nr_pages);
bc02af93
YG
82
83#ifdef CONFIG_NUMA
84extern int memory_add_physaddr_to_nid(u64 start);
85#else
86static inline int memory_add_physaddr_to_nid(u64 start)
87{
88 return 0;
89}
90#endif
91
306d6cbe
YG
92#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
93/*
94 * For supporting node-hotadd, we have to allocate a new pgdat.
95 *
96 * If an arch has generic style NODE_DATA(),
97 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
98 *
99 * In general, generic_alloc_nodedata() is used.
100 * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
101 *
102 */
dd0932d9
YG
103extern pg_data_t *arch_alloc_nodedata(int nid);
104extern void arch_free_nodedata(pg_data_t *pgdat);
7049027c 105extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
306d6cbe
YG
106
107#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
108
109#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
110#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
111
112#ifdef CONFIG_NUMA
113/*
114 * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
115 * XXX: kmalloc_node() can't work well to get new node's memory at this time.
116 * Because, pgdat for the new node is not allocated/initialized yet itself.
117 * To use new node's memory, more consideration will be necessary.
118 */
119#define generic_alloc_nodedata(nid) \
120({ \
121 kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
122})
123/*
124 * This definition is just for error path in node hotadd.
125 * For node hotremove, we have to replace this.
126 */
127#define generic_free_nodedata(pgdat) kfree(pgdat)
128
10ad400b
YG
129extern pg_data_t *node_data[];
130static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
131{
132 node_data[nid] = pgdat;
133}
134
306d6cbe
YG
135#else /* !CONFIG_NUMA */
136
137/* never called */
138static inline pg_data_t *generic_alloc_nodedata(int nid)
139{
140 BUG();
141 return NULL;
142}
143static inline void generic_free_nodedata(pg_data_t *pgdat)
144{
145}
10ad400b
YG
146static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
147{
148}
306d6cbe
YG
149#endif /* CONFIG_NUMA */
150#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
151
04753278
YG
152#ifdef CONFIG_SPARSEMEM_VMEMMAP
153static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
154{
155}
156static inline void put_page_bootmem(struct page *page)
157{
158}
159#else
160extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
161extern void put_page_bootmem(struct page *page);
162#endif
163
208d54e5
DH
164#else /* ! CONFIG_MEMORY_HOTPLUG */
165/*
166 * Stub functions for when hotplug is off
167 */
168static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
169static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
170static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
bdc8cb98
DH
171
172static inline unsigned zone_span_seqbegin(struct zone *zone)
173{
174 return 0;
175}
176static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
177{
178 return 0;
179}
180static inline void zone_span_writelock(struct zone *zone) {}
181static inline void zone_span_writeunlock(struct zone *zone) {}
182static inline void zone_seqlock_init(struct zone *zone) {}
3947be19
DH
183
184static inline int mhp_notimplemented(const char *func)
185{
186 printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
187 dump_stack();
188 return -ENOSYS;
189}
190
04753278
YG
191static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
192{
193}
194
bdc8cb98 195#endif /* ! CONFIG_MEMORY_HOTPLUG */
9d99aaa3 196
5c755e9f
BP
197#ifdef CONFIG_MEMORY_HOTREMOVE
198
199extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
200
201#else
202static inline int is_mem_section_removable(unsigned long pfn,
203 unsigned long nr_pages)
204{
205 return 0;
206}
207#endif /* CONFIG_MEMORY_HOTREMOVE */
208
cf23422b 209extern int mem_online_node(int nid);
bc02af93
YG
210extern int add_memory(int nid, u64 start, u64 size);
211extern int arch_add_memory(int nid, u64 start, u64 size);
9d99aaa3 212extern int remove_memory(u64 start, u64 size);
f28c5edc
KM
213extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
214 int nr_pages);
ea01ea93 215extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
04753278
YG
216extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
217 unsigned long pnum);
9d99aaa3 218
208d54e5 219#endif /* __LINUX_MEMORY_HOTPLUG_H */