]>
Commit | Line | Data |
---|---|---|
95f72d1e YL |
1 | /* |
2 | * Procedures for maintaining information about logical memory blocks. | |
3 | * | |
4 | * Peter Bergner, IBM Corp. June 2001. | |
5 | * Copyright (C) 2001 Peter Bergner. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
142b45a7 | 14 | #include <linux/slab.h> |
95f72d1e YL |
15 | #include <linux/init.h> |
16 | #include <linux/bitops.h> | |
449e8df3 | 17 | #include <linux/poison.h> |
c196f76f | 18 | #include <linux/pfn.h> |
6d03b885 BH |
19 | #include <linux/debugfs.h> |
20 | #include <linux/seq_file.h> | |
95f72d1e YL |
21 | #include <linux/memblock.h> |
22 | ||
95f72d1e YL |
23 | struct memblock memblock; |
24 | ||
5e63cf43 YL |
25 | int memblock_debug; |
26 | int memblock_can_resize; | |
bf23c51f BH |
27 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1]; |
28 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1]; | |
95f72d1e | 29 | |
4d629f9a BH |
30 | #define MEMBLOCK_ERROR (~(phys_addr_t)0) |
31 | ||
142b45a7 BH |
32 | /* inline so we don't get a warning when pr_debug is compiled out */ |
33 | static inline const char *memblock_type_name(struct memblock_type *type) | |
34 | { | |
35 | if (type == &memblock.memory) | |
36 | return "memory"; | |
37 | else if (type == &memblock.reserved) | |
38 | return "reserved"; | |
39 | else | |
40 | return "unknown"; | |
41 | } | |
42 | ||
6ed311b2 BH |
43 | /* |
44 | * Address comparison utilities | |
45 | */ | |
95f72d1e | 46 | |
6ed311b2 | 47 | static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size) |
95f72d1e | 48 | { |
6ed311b2 | 49 | return addr & ~(size - 1); |
95f72d1e YL |
50 | } |
51 | ||
6ed311b2 | 52 | static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size) |
95f72d1e | 53 | { |
6ed311b2 | 54 | return (addr + (size - 1)) & ~(size - 1); |
95f72d1e YL |
55 | } |
56 | ||
2898cc4c BH |
57 | static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, |
58 | phys_addr_t base2, phys_addr_t size2) | |
95f72d1e YL |
59 | { |
60 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | |
61 | } | |
62 | ||
2898cc4c BH |
63 | static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, |
64 | phys_addr_t base2, phys_addr_t size2) | |
95f72d1e YL |
65 | { |
66 | if (base2 == base1 + size1) | |
67 | return 1; | |
68 | else if (base1 == base2 + size2) | |
69 | return -1; | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
e3239ff9 | 74 | static long memblock_regions_adjacent(struct memblock_type *type, |
2898cc4c | 75 | unsigned long r1, unsigned long r2) |
95f72d1e | 76 | { |
2898cc4c BH |
77 | phys_addr_t base1 = type->regions[r1].base; |
78 | phys_addr_t size1 = type->regions[r1].size; | |
79 | phys_addr_t base2 = type->regions[r2].base; | |
80 | phys_addr_t size2 = type->regions[r2].size; | |
95f72d1e YL |
81 | |
82 | return memblock_addrs_adjacent(base1, size1, base2, size2); | |
83 | } | |
84 | ||
6ed311b2 BH |
85 | long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
86 | { | |
87 | unsigned long i; | |
88 | ||
89 | for (i = 0; i < type->cnt; i++) { | |
90 | phys_addr_t rgnbase = type->regions[i].base; | |
91 | phys_addr_t rgnsize = type->regions[i].size; | |
92 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | |
93 | break; | |
94 | } | |
95 | ||
96 | return (i < type->cnt) ? i : -1; | |
97 | } | |
98 | ||
99 | /* | |
100 | * Find, allocate, deallocate or reserve unreserved regions. All allocations | |
101 | * are top-down. | |
102 | */ | |
103 | ||
104 | static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end, | |
105 | phys_addr_t size, phys_addr_t align) | |
106 | { | |
107 | phys_addr_t base, res_base; | |
108 | long j; | |
109 | ||
110 | base = memblock_align_down((end - size), align); | |
111 | while (start <= base) { | |
112 | j = memblock_overlaps_region(&memblock.reserved, base, size); | |
113 | if (j < 0) | |
114 | return base; | |
115 | res_base = memblock.reserved.regions[j].base; | |
116 | if (res_base < size) | |
117 | break; | |
118 | base = memblock_align_down(res_base - size, align); | |
119 | } | |
120 | ||
121 | return MEMBLOCK_ERROR; | |
122 | } | |
123 | ||
fef501d4 BH |
124 | static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align, |
125 | phys_addr_t start, phys_addr_t end) | |
6ed311b2 BH |
126 | { |
127 | long i; | |
6ed311b2 BH |
128 | |
129 | BUG_ON(0 == size); | |
130 | ||
131 | size = memblock_align_up(size, align); | |
132 | ||
133 | /* Pump up max_addr */ | |
fef501d4 BH |
134 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) |
135 | end = memblock.current_limit; | |
6ed311b2 BH |
136 | |
137 | /* We do a top-down search, this tends to limit memory | |
138 | * fragmentation by keeping early boot allocs near the | |
139 | * top of memory | |
140 | */ | |
141 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | |
142 | phys_addr_t memblockbase = memblock.memory.regions[i].base; | |
143 | phys_addr_t memblocksize = memblock.memory.regions[i].size; | |
fef501d4 | 144 | phys_addr_t bottom, top, found; |
6ed311b2 BH |
145 | |
146 | if (memblocksize < size) | |
147 | continue; | |
fef501d4 BH |
148 | if ((memblockbase + memblocksize) <= start) |
149 | break; | |
150 | bottom = max(memblockbase, start); | |
151 | top = min(memblockbase + memblocksize, end); | |
152 | if (bottom >= top) | |
153 | continue; | |
154 | found = memblock_find_region(bottom, top, size, align); | |
155 | if (found != MEMBLOCK_ERROR) | |
156 | return found; | |
6ed311b2 BH |
157 | } |
158 | return MEMBLOCK_ERROR; | |
159 | } | |
160 | ||
e3239ff9 | 161 | static void memblock_remove_region(struct memblock_type *type, unsigned long r) |
95f72d1e YL |
162 | { |
163 | unsigned long i; | |
164 | ||
e3239ff9 BH |
165 | for (i = r; i < type->cnt - 1; i++) { |
166 | type->regions[i].base = type->regions[i + 1].base; | |
167 | type->regions[i].size = type->regions[i + 1].size; | |
95f72d1e | 168 | } |
e3239ff9 | 169 | type->cnt--; |
95f72d1e YL |
170 | } |
171 | ||
172 | /* Assumption: base addr of region 1 < base addr of region 2 */ | |
e3239ff9 | 173 | static void memblock_coalesce_regions(struct memblock_type *type, |
95f72d1e YL |
174 | unsigned long r1, unsigned long r2) |
175 | { | |
e3239ff9 BH |
176 | type->regions[r1].size += type->regions[r2].size; |
177 | memblock_remove_region(type, r2); | |
95f72d1e YL |
178 | } |
179 | ||
142b45a7 BH |
180 | /* Defined below but needed now */ |
181 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); | |
182 | ||
183 | static int memblock_double_array(struct memblock_type *type) | |
184 | { | |
185 | struct memblock_region *new_array, *old_array; | |
186 | phys_addr_t old_size, new_size, addr; | |
187 | int use_slab = slab_is_available(); | |
188 | ||
189 | /* We don't allow resizing until we know about the reserved regions | |
190 | * of memory that aren't suitable for allocation | |
191 | */ | |
192 | if (!memblock_can_resize) | |
193 | return -1; | |
194 | ||
142b45a7 BH |
195 | /* Calculate new doubled size */ |
196 | old_size = type->max * sizeof(struct memblock_region); | |
197 | new_size = old_size << 1; | |
198 | ||
199 | /* Try to find some space for it. | |
200 | * | |
201 | * WARNING: We assume that either slab_is_available() and we use it or | |
202 | * we use MEMBLOCK for allocations. That means that this is unsafe to use | |
203 | * when bootmem is currently active (unless bootmem itself is implemented | |
204 | * on top of MEMBLOCK which isn't the case yet) | |
205 | * | |
206 | * This should however not be an issue for now, as we currently only | |
207 | * call into MEMBLOCK while it's still active, or much later when slab is | |
208 | * active for memory hotplug operations | |
209 | */ | |
210 | if (use_slab) { | |
211 | new_array = kmalloc(new_size, GFP_KERNEL); | |
212 | addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); | |
213 | } else | |
fef501d4 | 214 | addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE); |
142b45a7 BH |
215 | if (addr == MEMBLOCK_ERROR) { |
216 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", | |
217 | memblock_type_name(type), type->max, type->max * 2); | |
218 | return -1; | |
219 | } | |
220 | new_array = __va(addr); | |
221 | ||
ea9e4376 YL |
222 | memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", |
223 | memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); | |
224 | ||
142b45a7 BH |
225 | /* Found space, we now need to move the array over before |
226 | * we add the reserved region since it may be our reserved | |
227 | * array itself that is full. | |
228 | */ | |
229 | memcpy(new_array, type->regions, old_size); | |
230 | memset(new_array + type->max, 0, old_size); | |
231 | old_array = type->regions; | |
232 | type->regions = new_array; | |
233 | type->max <<= 1; | |
234 | ||
235 | /* If we use SLAB that's it, we are done */ | |
236 | if (use_slab) | |
237 | return 0; | |
238 | ||
239 | /* Add the new reserved region now. Should not fail ! */ | |
240 | BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0); | |
241 | ||
242 | /* If the array wasn't our static init one, then free it. We only do | |
243 | * that before SLAB is available as later on, we don't know whether | |
244 | * to use kfree or free_bootmem_pages(). Shouldn't be a big deal | |
245 | * anyways | |
246 | */ | |
247 | if (old_array != memblock_memory_init_regions && | |
248 | old_array != memblock_reserved_init_regions) | |
249 | memblock_free(__pa(old_array), old_size); | |
250 | ||
251 | return 0; | |
252 | } | |
253 | ||
d2cd563b BH |
254 | extern int __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, |
255 | phys_addr_t addr2, phys_addr_t size2) | |
256 | { | |
257 | return 1; | |
258 | } | |
259 | ||
2898cc4c | 260 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
261 | { |
262 | unsigned long coalesced = 0; | |
263 | long adjacent, i; | |
264 | ||
e3239ff9 BH |
265 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { |
266 | type->regions[0].base = base; | |
267 | type->regions[0].size = size; | |
95f72d1e YL |
268 | return 0; |
269 | } | |
270 | ||
271 | /* First try and coalesce this MEMBLOCK with another. */ | |
e3239ff9 | 272 | for (i = 0; i < type->cnt; i++) { |
2898cc4c BH |
273 | phys_addr_t rgnbase = type->regions[i].base; |
274 | phys_addr_t rgnsize = type->regions[i].size; | |
95f72d1e YL |
275 | |
276 | if ((rgnbase == base) && (rgnsize == size)) | |
277 | /* Already have this region, so we're done */ | |
278 | return 0; | |
279 | ||
280 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); | |
d2cd563b BH |
281 | /* Check if arch allows coalescing */ |
282 | if (adjacent != 0 && type == &memblock.memory && | |
283 | !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize)) | |
284 | break; | |
95f72d1e | 285 | if (adjacent > 0) { |
e3239ff9 BH |
286 | type->regions[i].base -= size; |
287 | type->regions[i].size += size; | |
95f72d1e YL |
288 | coalesced++; |
289 | break; | |
290 | } else if (adjacent < 0) { | |
e3239ff9 | 291 | type->regions[i].size += size; |
95f72d1e YL |
292 | coalesced++; |
293 | break; | |
294 | } | |
295 | } | |
296 | ||
d2cd563b BH |
297 | /* If we plugged a hole, we may want to also coalesce with the |
298 | * next region | |
299 | */ | |
300 | if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) && | |
301 | ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base, | |
302 | type->regions[i].size, | |
303 | type->regions[i+1].base, | |
304 | type->regions[i+1].size)))) { | |
e3239ff9 | 305 | memblock_coalesce_regions(type, i, i+1); |
95f72d1e YL |
306 | coalesced++; |
307 | } | |
308 | ||
309 | if (coalesced) | |
310 | return coalesced; | |
142b45a7 BH |
311 | |
312 | /* If we are out of space, we fail. It's too late to resize the array | |
313 | * but then this shouldn't have happened in the first place. | |
314 | */ | |
315 | if (WARN_ON(type->cnt >= type->max)) | |
95f72d1e YL |
316 | return -1; |
317 | ||
318 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | |
e3239ff9 BH |
319 | for (i = type->cnt - 1; i >= 0; i--) { |
320 | if (base < type->regions[i].base) { | |
321 | type->regions[i+1].base = type->regions[i].base; | |
322 | type->regions[i+1].size = type->regions[i].size; | |
95f72d1e | 323 | } else { |
e3239ff9 BH |
324 | type->regions[i+1].base = base; |
325 | type->regions[i+1].size = size; | |
95f72d1e YL |
326 | break; |
327 | } | |
328 | } | |
329 | ||
e3239ff9 BH |
330 | if (base < type->regions[0].base) { |
331 | type->regions[0].base = base; | |
332 | type->regions[0].size = size; | |
95f72d1e | 333 | } |
e3239ff9 | 334 | type->cnt++; |
95f72d1e | 335 | |
142b45a7 BH |
336 | /* The array is full ? Try to resize it. If that fails, we undo |
337 | * our allocation and return an error | |
338 | */ | |
339 | if (type->cnt == type->max && memblock_double_array(type)) { | |
340 | type->cnt--; | |
341 | return -1; | |
342 | } | |
343 | ||
95f72d1e YL |
344 | return 0; |
345 | } | |
346 | ||
2898cc4c | 347 | long memblock_add(phys_addr_t base, phys_addr_t size) |
95f72d1e | 348 | { |
e3239ff9 | 349 | return memblock_add_region(&memblock.memory, base, size); |
95f72d1e YL |
350 | |
351 | } | |
352 | ||
2898cc4c | 353 | static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
95f72d1e | 354 | { |
2898cc4c BH |
355 | phys_addr_t rgnbegin, rgnend; |
356 | phys_addr_t end = base + size; | |
95f72d1e YL |
357 | int i; |
358 | ||
359 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | |
360 | ||
361 | /* Find the region where (base, size) belongs to */ | |
e3239ff9 BH |
362 | for (i=0; i < type->cnt; i++) { |
363 | rgnbegin = type->regions[i].base; | |
364 | rgnend = rgnbegin + type->regions[i].size; | |
95f72d1e YL |
365 | |
366 | if ((rgnbegin <= base) && (end <= rgnend)) | |
367 | break; | |
368 | } | |
369 | ||
370 | /* Didn't find the region */ | |
e3239ff9 | 371 | if (i == type->cnt) |
95f72d1e YL |
372 | return -1; |
373 | ||
374 | /* Check to see if we are removing entire region */ | |
375 | if ((rgnbegin == base) && (rgnend == end)) { | |
e3239ff9 | 376 | memblock_remove_region(type, i); |
95f72d1e YL |
377 | return 0; |
378 | } | |
379 | ||
380 | /* Check to see if region is matching at the front */ | |
381 | if (rgnbegin == base) { | |
e3239ff9 BH |
382 | type->regions[i].base = end; |
383 | type->regions[i].size -= size; | |
95f72d1e YL |
384 | return 0; |
385 | } | |
386 | ||
387 | /* Check to see if the region is matching at the end */ | |
388 | if (rgnend == end) { | |
e3239ff9 | 389 | type->regions[i].size -= size; |
95f72d1e YL |
390 | return 0; |
391 | } | |
392 | ||
393 | /* | |
394 | * We need to split the entry - adjust the current one to the | |
395 | * beginging of the hole and add the region after hole. | |
396 | */ | |
e3239ff9 BH |
397 | type->regions[i].size = base - type->regions[i].base; |
398 | return memblock_add_region(type, end, rgnend - end); | |
95f72d1e YL |
399 | } |
400 | ||
2898cc4c | 401 | long memblock_remove(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
402 | { |
403 | return __memblock_remove(&memblock.memory, base, size); | |
404 | } | |
405 | ||
2898cc4c | 406 | long __init memblock_free(phys_addr_t base, phys_addr_t size) |
95f72d1e YL |
407 | { |
408 | return __memblock_remove(&memblock.reserved, base, size); | |
409 | } | |
410 | ||
2898cc4c | 411 | long __init memblock_reserve(phys_addr_t base, phys_addr_t size) |
95f72d1e | 412 | { |
e3239ff9 | 413 | struct memblock_type *_rgn = &memblock.reserved; |
95f72d1e YL |
414 | |
415 | BUG_ON(0 == size); | |
416 | ||
417 | return memblock_add_region(_rgn, base, size); | |
418 | } | |
419 | ||
6ed311b2 | 420 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 421 | { |
6ed311b2 | 422 | phys_addr_t found; |
95f72d1e | 423 | |
6ed311b2 BH |
424 | /* We align the size to limit fragmentation. Without this, a lot of |
425 | * small allocs quickly eat up the whole reserve array on sparc | |
426 | */ | |
427 | size = memblock_align_up(size, align); | |
95f72d1e | 428 | |
fef501d4 | 429 | found = memblock_find_base(size, align, 0, max_addr); |
6ed311b2 BH |
430 | if (found != MEMBLOCK_ERROR && |
431 | memblock_add_region(&memblock.reserved, found, size) >= 0) | |
432 | return found; | |
95f72d1e | 433 | |
6ed311b2 | 434 | return 0; |
95f72d1e YL |
435 | } |
436 | ||
6ed311b2 | 437 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
95f72d1e | 438 | { |
6ed311b2 BH |
439 | phys_addr_t alloc; |
440 | ||
441 | alloc = __memblock_alloc_base(size, align, max_addr); | |
442 | ||
443 | if (alloc == 0) | |
444 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | |
445 | (unsigned long long) size, (unsigned long long) max_addr); | |
446 | ||
447 | return alloc; | |
95f72d1e YL |
448 | } |
449 | ||
6ed311b2 | 450 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
95f72d1e | 451 | { |
6ed311b2 BH |
452 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
453 | } | |
95f72d1e | 454 | |
95f72d1e | 455 | |
6ed311b2 BH |
456 | /* |
457 | * Additional node-local allocators. Search for node memory is bottom up | |
458 | * and walks memblock regions within that node bottom-up as well, but allocation | |
c196f76f BH |
459 | * within an memblock region is top-down. XXX I plan to fix that at some stage |
460 | * | |
461 | * WARNING: Only available after early_node_map[] has been populated, | |
462 | * on some architectures, that is after all the calls to add_active_range() | |
463 | * have been done to populate it. | |
6ed311b2 | 464 | */ |
95f72d1e | 465 | |
2898cc4c | 466 | phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) |
c3f72b57 | 467 | { |
c196f76f BH |
468 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
469 | /* | |
470 | * This code originates from sparc which really wants use to walk by addresses | |
471 | * and returns the nid. This is not very convenient for early_pfn_map[] users | |
472 | * as the map isn't sorted yet, and it really wants to be walked by nid. | |
473 | * | |
474 | * For now, I implement the inefficient method below which walks the early | |
475 | * map multiple times. Eventually we may want to use an ARCH config option | |
476 | * to implement a completely different method for both case. | |
477 | */ | |
478 | unsigned long start_pfn, end_pfn; | |
479 | int i; | |
480 | ||
481 | for (i = 0; i < MAX_NUMNODES; i++) { | |
482 | get_pfn_range_for_nid(i, &start_pfn, &end_pfn); | |
483 | if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn)) | |
484 | continue; | |
485 | *nid = i; | |
486 | return min(end, PFN_PHYS(end_pfn)); | |
487 | } | |
488 | #endif | |
c3f72b57 BH |
489 | *nid = 0; |
490 | ||
491 | return end; | |
492 | } | |
493 | ||
2898cc4c BH |
494 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, |
495 | phys_addr_t size, | |
496 | phys_addr_t align, int nid) | |
95f72d1e | 497 | { |
2898cc4c | 498 | phys_addr_t start, end; |
95f72d1e YL |
499 | |
500 | start = mp->base; | |
501 | end = start + mp->size; | |
502 | ||
503 | start = memblock_align_up(start, align); | |
504 | while (start < end) { | |
2898cc4c | 505 | phys_addr_t this_end; |
95f72d1e YL |
506 | int this_nid; |
507 | ||
35a1f0bd | 508 | this_end = memblock_nid_range(start, end, &this_nid); |
95f72d1e | 509 | if (this_nid == nid) { |
3a9c2c81 | 510 | phys_addr_t ret = memblock_find_region(start, this_end, size, align); |
4d629f9a | 511 | if (ret != MEMBLOCK_ERROR && |
3a9c2c81 | 512 | memblock_add_region(&memblock.reserved, ret, size) >= 0) |
95f72d1e YL |
513 | return ret; |
514 | } | |
515 | start = this_end; | |
516 | } | |
517 | ||
4d629f9a | 518 | return MEMBLOCK_ERROR; |
95f72d1e YL |
519 | } |
520 | ||
2898cc4c | 521 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
95f72d1e | 522 | { |
e3239ff9 | 523 | struct memblock_type *mem = &memblock.memory; |
95f72d1e YL |
524 | int i; |
525 | ||
526 | BUG_ON(0 == size); | |
527 | ||
7f219c73 BH |
528 | /* We align the size to limit fragmentation. Without this, a lot of |
529 | * small allocs quickly eat up the whole reserve array on sparc | |
530 | */ | |
531 | size = memblock_align_up(size, align); | |
532 | ||
c3f72b57 BH |
533 | /* We do a bottom-up search for a region with the right |
534 | * nid since that's easier considering how memblock_nid_range() | |
535 | * works | |
536 | */ | |
95f72d1e | 537 | for (i = 0; i < mem->cnt; i++) { |
2898cc4c | 538 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], |
95f72d1e | 539 | size, align, nid); |
4d629f9a | 540 | if (ret != MEMBLOCK_ERROR) |
95f72d1e YL |
541 | return ret; |
542 | } | |
543 | ||
9d1e2492 BH |
544 | return 0; |
545 | } | |
546 | ||
547 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) | |
548 | { | |
549 | phys_addr_t res = memblock_alloc_nid(size, align, nid); | |
550 | ||
551 | if (res) | |
552 | return res; | |
918fe8d6 | 553 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); |
95f72d1e YL |
554 | } |
555 | ||
9d1e2492 BH |
556 | |
557 | /* | |
558 | * Remaining API functions | |
559 | */ | |
560 | ||
95f72d1e | 561 | /* You must call memblock_analyze() before this. */ |
2898cc4c | 562 | phys_addr_t __init memblock_phys_mem_size(void) |
95f72d1e | 563 | { |
4734b594 | 564 | return memblock.memory_size; |
95f72d1e YL |
565 | } |
566 | ||
2898cc4c | 567 | phys_addr_t memblock_end_of_DRAM(void) |
95f72d1e YL |
568 | { |
569 | int idx = memblock.memory.cnt - 1; | |
570 | ||
e3239ff9 | 571 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
95f72d1e YL |
572 | } |
573 | ||
574 | /* You must call memblock_analyze() after this. */ | |
2898cc4c | 575 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) |
95f72d1e YL |
576 | { |
577 | unsigned long i; | |
2898cc4c | 578 | phys_addr_t limit; |
e3239ff9 | 579 | struct memblock_region *p; |
95f72d1e YL |
580 | |
581 | if (!memory_limit) | |
582 | return; | |
583 | ||
584 | /* Truncate the memblock regions to satisfy the memory limit. */ | |
585 | limit = memory_limit; | |
586 | for (i = 0; i < memblock.memory.cnt; i++) { | |
e3239ff9 BH |
587 | if (limit > memblock.memory.regions[i].size) { |
588 | limit -= memblock.memory.regions[i].size; | |
95f72d1e YL |
589 | continue; |
590 | } | |
591 | ||
e3239ff9 | 592 | memblock.memory.regions[i].size = limit; |
95f72d1e YL |
593 | memblock.memory.cnt = i + 1; |
594 | break; | |
595 | } | |
596 | ||
95f72d1e YL |
597 | memory_limit = memblock_end_of_DRAM(); |
598 | ||
599 | /* And truncate any reserves above the limit also. */ | |
600 | for (i = 0; i < memblock.reserved.cnt; i++) { | |
e3239ff9 | 601 | p = &memblock.reserved.regions[i]; |
95f72d1e YL |
602 | |
603 | if (p->base > memory_limit) | |
604 | p->size = 0; | |
605 | else if ((p->base + p->size) > memory_limit) | |
606 | p->size = memory_limit - p->base; | |
607 | ||
608 | if (p->size == 0) { | |
609 | memblock_remove_region(&memblock.reserved, i); | |
610 | i--; | |
611 | } | |
612 | } | |
613 | } | |
614 | ||
2898cc4c | 615 | static int memblock_search(struct memblock_type *type, phys_addr_t addr) |
72d4b0b4 BH |
616 | { |
617 | unsigned int left = 0, right = type->cnt; | |
618 | ||
619 | do { | |
620 | unsigned int mid = (right + left) / 2; | |
621 | ||
622 | if (addr < type->regions[mid].base) | |
623 | right = mid; | |
624 | else if (addr >= (type->regions[mid].base + | |
625 | type->regions[mid].size)) | |
626 | left = mid + 1; | |
627 | else | |
628 | return mid; | |
629 | } while (left < right); | |
630 | return -1; | |
631 | } | |
632 | ||
2898cc4c | 633 | int __init memblock_is_reserved(phys_addr_t addr) |
95f72d1e | 634 | { |
72d4b0b4 BH |
635 | return memblock_search(&memblock.reserved, addr) != -1; |
636 | } | |
95f72d1e | 637 | |
2898cc4c | 638 | int memblock_is_memory(phys_addr_t addr) |
72d4b0b4 BH |
639 | { |
640 | return memblock_search(&memblock.memory, addr) != -1; | |
641 | } | |
642 | ||
2898cc4c | 643 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size) |
72d4b0b4 BH |
644 | { |
645 | int idx = memblock_search(&memblock.reserved, base); | |
646 | ||
647 | if (idx == -1) | |
648 | return 0; | |
649 | return memblock.reserved.regions[idx].base <= base && | |
650 | (memblock.reserved.regions[idx].base + | |
651 | memblock.reserved.regions[idx].size) >= (base + size); | |
95f72d1e YL |
652 | } |
653 | ||
2898cc4c | 654 | int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) |
95f72d1e | 655 | { |
f1c2c19c | 656 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; |
95f72d1e YL |
657 | } |
658 | ||
e63075a3 | 659 | |
2898cc4c | 660 | void __init memblock_set_current_limit(phys_addr_t limit) |
e63075a3 BH |
661 | { |
662 | memblock.current_limit = limit; | |
663 | } | |
664 | ||
6ed311b2 BH |
665 | static void memblock_dump(struct memblock_type *region, char *name) |
666 | { | |
667 | unsigned long long base, size; | |
668 | int i; | |
669 | ||
670 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | |
671 | ||
672 | for (i = 0; i < region->cnt; i++) { | |
673 | base = region->regions[i].base; | |
674 | size = region->regions[i].size; | |
675 | ||
ea9e4376 | 676 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n", |
6ed311b2 BH |
677 | name, i, base, base + size - 1, size); |
678 | } | |
679 | } | |
680 | ||
681 | void memblock_dump_all(void) | |
682 | { | |
683 | if (!memblock_debug) | |
684 | return; | |
685 | ||
686 | pr_info("MEMBLOCK configuration:\n"); | |
687 | pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); | |
688 | ||
689 | memblock_dump(&memblock.memory, "memory"); | |
690 | memblock_dump(&memblock.reserved, "reserved"); | |
691 | } | |
692 | ||
693 | void __init memblock_analyze(void) | |
694 | { | |
695 | int i; | |
696 | ||
697 | /* Check marker in the unused last array entry */ | |
698 | WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base | |
699 | != (phys_addr_t)RED_INACTIVE); | |
700 | WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base | |
701 | != (phys_addr_t)RED_INACTIVE); | |
702 | ||
703 | memblock.memory_size = 0; | |
704 | ||
705 | for (i = 0; i < memblock.memory.cnt; i++) | |
706 | memblock.memory_size += memblock.memory.regions[i].size; | |
142b45a7 BH |
707 | |
708 | /* We allow resizing from there */ | |
709 | memblock_can_resize = 1; | |
6ed311b2 BH |
710 | } |
711 | ||
7590abe8 BH |
712 | void __init memblock_init(void) |
713 | { | |
714 | /* Hookup the initial arrays */ | |
715 | memblock.memory.regions = memblock_memory_init_regions; | |
716 | memblock.memory.max = INIT_MEMBLOCK_REGIONS; | |
717 | memblock.reserved.regions = memblock_reserved_init_regions; | |
718 | memblock.reserved.max = INIT_MEMBLOCK_REGIONS; | |
719 | ||
720 | /* Write a marker in the unused last array entry */ | |
721 | memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
722 | memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | |
723 | ||
724 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | |
725 | * This simplifies the memblock_add() code below... | |
726 | */ | |
727 | memblock.memory.regions[0].base = 0; | |
728 | memblock.memory.regions[0].size = 0; | |
729 | memblock.memory.cnt = 1; | |
730 | ||
731 | /* Ditto. */ | |
732 | memblock.reserved.regions[0].base = 0; | |
733 | memblock.reserved.regions[0].size = 0; | |
734 | memblock.reserved.cnt = 1; | |
735 | ||
736 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | |
737 | } | |
738 | ||
6ed311b2 BH |
739 | static int __init early_memblock(char *p) |
740 | { | |
741 | if (p && strstr(p, "debug")) | |
742 | memblock_debug = 1; | |
743 | return 0; | |
744 | } | |
745 | early_param("memblock", early_memblock); | |
746 | ||
6d03b885 BH |
747 | #ifdef CONFIG_DEBUG_FS |
748 | ||
749 | static int memblock_debug_show(struct seq_file *m, void *private) | |
750 | { | |
751 | struct memblock_type *type = m->private; | |
752 | struct memblock_region *reg; | |
753 | int i; | |
754 | ||
755 | for (i = 0; i < type->cnt; i++) { | |
756 | reg = &type->regions[i]; | |
757 | seq_printf(m, "%4d: ", i); | |
758 | if (sizeof(phys_addr_t) == 4) | |
759 | seq_printf(m, "0x%08lx..0x%08lx\n", | |
760 | (unsigned long)reg->base, | |
761 | (unsigned long)(reg->base + reg->size - 1)); | |
762 | else | |
763 | seq_printf(m, "0x%016llx..0x%016llx\n", | |
764 | (unsigned long long)reg->base, | |
765 | (unsigned long long)(reg->base + reg->size - 1)); | |
766 | ||
767 | } | |
768 | return 0; | |
769 | } | |
770 | ||
771 | static int memblock_debug_open(struct inode *inode, struct file *file) | |
772 | { | |
773 | return single_open(file, memblock_debug_show, inode->i_private); | |
774 | } | |
775 | ||
776 | static const struct file_operations memblock_debug_fops = { | |
777 | .open = memblock_debug_open, | |
778 | .read = seq_read, | |
779 | .llseek = seq_lseek, | |
780 | .release = single_release, | |
781 | }; | |
782 | ||
783 | static int __init memblock_init_debugfs(void) | |
784 | { | |
785 | struct dentry *root = debugfs_create_dir("memblock", NULL); | |
786 | if (!root) | |
787 | return -ENXIO; | |
788 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); | |
789 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); | |
790 | ||
791 | return 0; | |
792 | } | |
793 | __initcall(memblock_init_debugfs); | |
794 | ||
795 | #endif /* CONFIG_DEBUG_FS */ |