]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/memory_hotplug.c
Memoryless nodes: Generic management of nodemasks for various purposes
[net-next-2.6.git] / mm / memory_hotplug.c
CommitLineData
3947be19
DH
1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
3947be19
DH
7#include <linux/stddef.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/interrupt.h>
11#include <linux/pagemap.h>
12#include <linux/bootmem.h>
13#include <linux/compiler.h>
14#include <linux/module.h>
15#include <linux/pagevec.h>
2d1d43f6 16#include <linux/writeback.h>
3947be19
DH
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h>
23#include <linux/vmalloc.h>
0a547039 24#include <linux/ioport.h>
38837fc7 25#include <linux/cpuset.h>
3947be19
DH
26
27#include <asm/tlbflush.h>
28
45e0b78b
KM
29/* add this memory to iomem resource */
30static struct resource *register_memory_resource(u64 start, u64 size)
31{
32 struct resource *res;
33 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
34 BUG_ON(!res);
35
36 res->name = "System RAM";
37 res->start = start;
38 res->end = start + size - 1;
39 res->flags = IORESOURCE_MEM;
40 if (request_resource(&iomem_resource, res) < 0) {
41 printk("System RAM resource %llx - %llx cannot be added\n",
42 (unsigned long long)res->start, (unsigned long long)res->end);
43 kfree(res);
44 res = NULL;
45 }
46 return res;
47}
48
49static void release_memory_resource(struct resource *res)
50{
51 if (!res)
52 return;
53 release_resource(res);
54 kfree(res);
55 return;
56}
57
58
53947027 59#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
718127cc 60static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
3947be19
DH
61{
62 struct pglist_data *pgdat = zone->zone_pgdat;
63 int nr_pages = PAGES_PER_SECTION;
64 int nid = pgdat->node_id;
65 int zone_type;
66
67 zone_type = zone - pgdat->node_zones;
13466c84 68 if (!zone->wait_table) {
718127cc 69 int ret = 0;
a2f3aa02
DH
70 ret = init_currently_empty_zone(zone, phys_start_pfn,
71 nr_pages, MEMMAP_HOTPLUG);
718127cc
YG
72 if (ret < 0)
73 return ret;
74 }
a2f3aa02
DH
75 memmap_init_zone(nr_pages, nid, zone_type,
76 phys_start_pfn, MEMMAP_HOTPLUG);
718127cc 77 return 0;
3947be19
DH
78}
79
3947be19
DH
80static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
81{
3947be19 82 int nr_pages = PAGES_PER_SECTION;
3947be19
DH
83 int ret;
84
ebd15302
KH
85 if (pfn_valid(phys_start_pfn))
86 return -EEXIST;
87
0b0acbec 88 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
3947be19
DH
89
90 if (ret < 0)
91 return ret;
92
718127cc
YG
93 ret = __add_zone(zone, phys_start_pfn);
94
95 if (ret < 0)
96 return ret;
97
3947be19
DH
98 return register_new_memory(__pfn_to_section(phys_start_pfn));
99}
100
101/*
102 * Reasonably generic function for adding memory. It is
103 * expected that archs that support memory hotplug will
104 * call this function after deciding the zone to which to
105 * add the new pages.
106 */
107int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
108 unsigned long nr_pages)
109{
110 unsigned long i;
111 int err = 0;
6f712711
KH
112 int start_sec, end_sec;
113 /* during initialize mem_map, align hot-added range to section */
114 start_sec = pfn_to_section_nr(phys_start_pfn);
115 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
3947be19 116
6f712711
KH
117 for (i = start_sec; i <= end_sec; i++) {
118 err = __add_section(zone, i << PFN_SECTION_SHIFT);
3947be19 119
6f712711
KH
120 /*
121 * EEXIST is finally dealed with by ioresource collision
122 * check. see add_memory() => register_memory_resource()
123 * Warning will be printed if there is collision.
bed120c6
JS
124 */
125 if (err && (err != -EEXIST))
3947be19 126 break;
6f712711 127 err = 0;
3947be19
DH
128 }
129
130 return err;
131}
bed120c6 132EXPORT_SYMBOL_GPL(__add_pages);
3947be19
DH
133
134static void grow_zone_span(struct zone *zone,
135 unsigned long start_pfn, unsigned long end_pfn)
136{
137 unsigned long old_zone_end_pfn;
138
139 zone_span_writelock(zone);
140
141 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
142 if (start_pfn < zone->zone_start_pfn)
143 zone->zone_start_pfn = start_pfn;
144
25a6df95
YG
145 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
146 zone->zone_start_pfn;
3947be19
DH
147
148 zone_span_writeunlock(zone);
149}
150
151static void grow_pgdat_span(struct pglist_data *pgdat,
152 unsigned long start_pfn, unsigned long end_pfn)
153{
154 unsigned long old_pgdat_end_pfn =
155 pgdat->node_start_pfn + pgdat->node_spanned_pages;
156
157 if (start_pfn < pgdat->node_start_pfn)
158 pgdat->node_start_pfn = start_pfn;
159
25a6df95
YG
160 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
161 pgdat->node_start_pfn;
3947be19
DH
162}
163
164int online_pages(unsigned long pfn, unsigned long nr_pages)
165{
166 unsigned long i;
167 unsigned long flags;
168 unsigned long onlined_pages = 0;
2842f114
KH
169 struct resource res;
170 u64 section_end;
171 unsigned long start_pfn;
3947be19 172 struct zone *zone;
6811378e 173 int need_zonelists_rebuild = 0;
3947be19
DH
174
175 /*
176 * This doesn't need a lock to do pfn_to_page().
177 * The section can't be removed here because of the
178 * memory_block->state_sem.
179 */
180 zone = page_zone(pfn_to_page(pfn));
181 pgdat_resize_lock(zone->zone_pgdat, &flags);
182 grow_zone_span(zone, pfn, pfn + nr_pages);
183 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
184 pgdat_resize_unlock(zone->zone_pgdat, &flags);
185
6811378e
YG
186 /*
187 * If this zone is not populated, then it is not in zonelist.
188 * This means the page allocator ignores this zone.
189 * So, zonelist must be updated after online.
190 */
191 if (!populated_zone(zone))
192 need_zonelists_rebuild = 1;
193
2842f114
KH
194 res.start = (u64)pfn << PAGE_SHIFT;
195 res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
196 res.flags = IORESOURCE_MEM; /* we just need system ram */
197 section_end = res.end;
198
58c1b5b0 199 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
2842f114
KH
200 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
201 nr_pages = (unsigned long)
202 ((res.end + 1 - res.start) >> PAGE_SHIFT);
203
204 if (PageReserved(pfn_to_page(start_pfn))) {
205 /* this region's page is not onlined now */
206 for (i = 0; i < nr_pages; i++) {
207 struct page *page = pfn_to_page(start_pfn + i);
208 online_page(page);
209 onlined_pages++;
210 }
211 }
212
213 res.start = res.end + 1;
214 res.end = section_end;
3947be19
DH
215 }
216 zone->present_pages += onlined_pages;
f2937be5 217 zone->zone_pgdat->node_present_pages += onlined_pages;
3947be19 218
61b13993
DH
219 setup_per_zone_pages_min();
220
6811378e
YG
221 if (need_zonelists_rebuild)
222 build_all_zonelists();
5a4d4361 223 vm_total_pages = nr_free_pagecache_pages();
2d1d43f6 224 writeback_set_ratelimit();
3947be19
DH
225 return 0;
226}
53947027 227#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
bc02af93 228
9af3c2de
YG
229static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
230{
231 struct pglist_data *pgdat;
232 unsigned long zones_size[MAX_NR_ZONES] = {0};
233 unsigned long zholes_size[MAX_NR_ZONES] = {0};
234 unsigned long start_pfn = start >> PAGE_SHIFT;
235
236 pgdat = arch_alloc_nodedata(nid);
237 if (!pgdat)
238 return NULL;
239
240 arch_refresh_nodedata(nid, pgdat);
241
242 /* we can use NODE_DATA(nid) from here */
243
244 /* init node's zones as empty zones, we don't have any present pages.*/
245 free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
246
247 return pgdat;
248}
249
250static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
251{
252 arch_refresh_nodedata(nid, NULL);
253 arch_free_nodedata(pgdat);
254 return;
255}
256
0a547039 257
bc02af93
YG
258int add_memory(int nid, u64 start, u64 size)
259{
9af3c2de
YG
260 pg_data_t *pgdat = NULL;
261 int new_pgdat = 0;
ebd15302 262 struct resource *res;
bc02af93
YG
263 int ret;
264
ebd15302
KH
265 res = register_memory_resource(start, size);
266 if (!res)
267 return -EEXIST;
268
9af3c2de
YG
269 if (!node_online(nid)) {
270 pgdat = hotadd_new_pgdat(nid, start);
271 if (!pgdat)
272 return -ENOMEM;
273 new_pgdat = 1;
274 ret = kswapd_run(nid);
275 if (ret)
276 goto error;
277 }
278
bc02af93
YG
279 /* call arch's memory hotadd */
280 ret = arch_add_memory(nid, start, size);
281
9af3c2de
YG
282 if (ret < 0)
283 goto error;
284
0fc44159 285 /* we online node here. we can't roll back from here. */
9af3c2de
YG
286 node_set_online(nid);
287
38837fc7
PJ
288 cpuset_track_online_nodes();
289
0fc44159
YG
290 if (new_pgdat) {
291 ret = register_one_node(nid);
292 /*
293 * If sysfs file of new node can't create, cpu on the node
294 * can't be hot-added. There is no rollback way now.
295 * So, check by BUG_ON() to catch it reluctantly..
296 */
297 BUG_ON(ret);
298 }
299
9af3c2de
YG
300 return ret;
301error:
302 /* rollback pgdat allocation and others */
303 if (new_pgdat)
304 rollback_node_hotadd(nid, pgdat);
ebd15302
KH
305 if (res)
306 release_memory_resource(res);
9af3c2de 307
bc02af93
YG
308 return ret;
309}
310EXPORT_SYMBOL_GPL(add_memory);