]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/percpu.c - percpu memory allocator | |
3 | * | |
4 | * Copyright (C) 2009 SUSE Linux Products GmbH | |
5 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> | |
6 | * | |
7 | * This file is released under the GPLv2. | |
8 | * | |
9 | * This is percpu allocator which can handle both static and dynamic | |
10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each | |
11 | * chunk is consisted of num_possible_cpus() units and the first chunk | |
12 | * is used for static percpu variables in the kernel image (special | |
13 | * boot time alloc/init handling necessary as these areas need to be | |
14 | * brought up before allocation services are running). Unit grows as | |
15 | * necessary and all units grow or shrink in unison. When a chunk is | |
16 | * filled up, another chunk is allocated. ie. in vmalloc area | |
17 | * | |
18 | * c0 c1 c2 | |
19 | * ------------------- ------------------- ------------ | |
20 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u | |
21 | * ------------------- ...... ------------------- .... ------------ | |
22 | * | |
23 | * Allocation is done in offset-size areas of single unit space. Ie, | |
24 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, | |
25 | * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring | |
26 | * percpu base registers pcpu_unit_size apart. | |
27 | * | |
28 | * There are usually many small percpu allocations many of them as | |
29 | * small as 4 bytes. The allocator organizes chunks into lists | |
30 | * according to free size and tries to allocate from the fullest one. | |
31 | * Each chunk keeps the maximum contiguous area size hint which is | |
32 | * guaranteed to be eqaul to or larger than the maximum contiguous | |
33 | * area in the chunk. This helps the allocator not to iterate the | |
34 | * chunk maps unnecessarily. | |
35 | * | |
36 | * Allocation state in each chunk is kept using an array of integers | |
37 | * on chunk->map. A positive value in the map represents a free | |
38 | * region and negative allocated. Allocation inside a chunk is done | |
39 | * by scanning this map sequentially and serving the first matching | |
40 | * entry. This is mostly copied from the percpu_modalloc() allocator. | |
41 | * Chunks can be determined from the address using the index field | |
42 | * in the page struct. The index field contains a pointer to the chunk. | |
43 | * | |
44 | * To use this allocator, arch code should do the followings. | |
45 | * | |
46 | * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA | |
47 | * | |
48 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | |
49 | * regular address to percpu pointer and back if they need to be | |
50 | * different from the default | |
51 | * | |
52 | * - use pcpu_setup_first_chunk() during percpu area initialization to | |
53 | * setup the first chunk containing the kernel static percpu area | |
54 | */ | |
55 | ||
56 | #include <linux/bitmap.h> | |
57 | #include <linux/bootmem.h> | |
58 | #include <linux/list.h> | |
59 | #include <linux/mm.h> | |
60 | #include <linux/module.h> | |
61 | #include <linux/mutex.h> | |
62 | #include <linux/percpu.h> | |
63 | #include <linux/pfn.h> | |
64 | #include <linux/slab.h> | |
65 | #include <linux/spinlock.h> | |
66 | #include <linux/vmalloc.h> | |
67 | #include <linux/workqueue.h> | |
68 | ||
69 | #include <asm/cacheflush.h> | |
70 | #include <asm/sections.h> | |
71 | #include <asm/tlbflush.h> | |
72 | ||
73 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ | |
74 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | |
75 | ||
76 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ | |
77 | #ifndef __addr_to_pcpu_ptr | |
78 | #define __addr_to_pcpu_ptr(addr) \ | |
79 | (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ | |
80 | + (unsigned long)__per_cpu_start) | |
81 | #endif | |
82 | #ifndef __pcpu_ptr_to_addr | |
83 | #define __pcpu_ptr_to_addr(ptr) \ | |
84 | (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ | |
85 | - (unsigned long)__per_cpu_start) | |
86 | #endif | |
87 | ||
88 | struct pcpu_chunk { | |
89 | struct list_head list; /* linked to pcpu_slot lists */ | |
90 | int free_size; /* free bytes in the chunk */ | |
91 | int contig_hint; /* max contiguous size hint */ | |
92 | struct vm_struct *vm; /* mapped vmalloc region */ | |
93 | int map_used; /* # of map entries used */ | |
94 | int map_alloc; /* # of map entries allocated */ | |
95 | int *map; /* allocation map */ | |
96 | bool immutable; /* no [de]population allowed */ | |
97 | unsigned long populated[]; /* populated bitmap */ | |
98 | }; | |
99 | ||
100 | static int pcpu_unit_pages __read_mostly; | |
101 | static int pcpu_unit_size __read_mostly; | |
102 | static int pcpu_chunk_size __read_mostly; | |
103 | static int pcpu_nr_slots __read_mostly; | |
104 | static size_t pcpu_chunk_struct_size __read_mostly; | |
105 | ||
106 | /* the address of the first chunk which starts with the kernel static area */ | |
107 | void *pcpu_base_addr __read_mostly; | |
108 | EXPORT_SYMBOL_GPL(pcpu_base_addr); | |
109 | ||
110 | /* | |
111 | * The first chunk which always exists. Note that unlike other | |
112 | * chunks, this one can be allocated and mapped in several different | |
113 | * ways and thus often doesn't live in the vmalloc area. | |
114 | */ | |
115 | static struct pcpu_chunk *pcpu_first_chunk; | |
116 | ||
117 | /* | |
118 | * Optional reserved chunk. This chunk reserves part of the first | |
119 | * chunk and serves it for reserved allocations. The amount of | |
120 | * reserved offset is in pcpu_reserved_chunk_limit. When reserved | |
121 | * area doesn't exist, the following variables contain NULL and 0 | |
122 | * respectively. | |
123 | */ | |
124 | static struct pcpu_chunk *pcpu_reserved_chunk; | |
125 | static int pcpu_reserved_chunk_limit; | |
126 | ||
127 | /* | |
128 | * Synchronization rules. | |
129 | * | |
130 | * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former | |
131 | * protects allocation/reclaim paths, chunks, populated bitmap and | |
132 | * vmalloc mapping. The latter is a spinlock and protects the index | |
133 | * data structures - chunk slots, chunks and area maps in chunks. | |
134 | * | |
135 | * During allocation, pcpu_alloc_mutex is kept locked all the time and | |
136 | * pcpu_lock is grabbed and released as necessary. All actual memory | |
137 | * allocations are done using GFP_KERNEL with pcpu_lock released. | |
138 | * | |
139 | * Free path accesses and alters only the index data structures, so it | |
140 | * can be safely called from atomic context. When memory needs to be | |
141 | * returned to the system, free path schedules reclaim_work which | |
142 | * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be | |
143 | * reclaimed, release both locks and frees the chunks. Note that it's | |
144 | * necessary to grab both locks to remove a chunk from circulation as | |
145 | * allocation path might be referencing the chunk with only | |
146 | * pcpu_alloc_mutex locked. | |
147 | */ | |
148 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ | |
149 | static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ | |
150 | ||
151 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ | |
152 | ||
153 | /* reclaim work to release fully free chunks, scheduled from free path */ | |
154 | static void pcpu_reclaim(struct work_struct *work); | |
155 | static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); | |
156 | ||
157 | static int __pcpu_size_to_slot(int size) | |
158 | { | |
159 | int highbit = fls(size); /* size is in bytes */ | |
160 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); | |
161 | } | |
162 | ||
163 | static int pcpu_size_to_slot(int size) | |
164 | { | |
165 | if (size == pcpu_unit_size) | |
166 | return pcpu_nr_slots - 1; | |
167 | return __pcpu_size_to_slot(size); | |
168 | } | |
169 | ||
170 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) | |
171 | { | |
172 | if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) | |
173 | return 0; | |
174 | ||
175 | return pcpu_size_to_slot(chunk->free_size); | |
176 | } | |
177 | ||
178 | static int pcpu_page_idx(unsigned int cpu, int page_idx) | |
179 | { | |
180 | return cpu * pcpu_unit_pages + page_idx; | |
181 | } | |
182 | ||
183 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | |
184 | unsigned int cpu, int page_idx) | |
185 | { | |
186 | return (unsigned long)chunk->vm->addr + | |
187 | (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); | |
188 | } | |
189 | ||
190 | static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, | |
191 | unsigned int cpu, int page_idx) | |
192 | { | |
193 | /* must not be used on pre-mapped chunk */ | |
194 | WARN_ON(chunk->immutable); | |
195 | ||
196 | return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); | |
197 | } | |
198 | ||
199 | /* set the pointer to a chunk in a page struct */ | |
200 | static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) | |
201 | { | |
202 | page->index = (unsigned long)pcpu; | |
203 | } | |
204 | ||
205 | /* obtain pointer to a chunk from a page struct */ | |
206 | static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) | |
207 | { | |
208 | return (struct pcpu_chunk *)page->index; | |
209 | } | |
210 | ||
211 | static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) | |
212 | { | |
213 | *rs = find_next_zero_bit(chunk->populated, end, *rs); | |
214 | *re = find_next_bit(chunk->populated, end, *rs + 1); | |
215 | } | |
216 | ||
217 | static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) | |
218 | { | |
219 | *rs = find_next_bit(chunk->populated, end, *rs); | |
220 | *re = find_next_zero_bit(chunk->populated, end, *rs + 1); | |
221 | } | |
222 | ||
223 | /* | |
224 | * (Un)populated page region iterators. Iterate over (un)populated | |
225 | * page regions betwen @start and @end in @chunk. @rs and @re should | |
226 | * be integer variables and will be set to start and end page index of | |
227 | * the current region. | |
228 | */ | |
229 | #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ | |
230 | for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ | |
231 | (rs) < (re); \ | |
232 | (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) | |
233 | ||
234 | #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ | |
235 | for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ | |
236 | (rs) < (re); \ | |
237 | (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) | |
238 | ||
239 | /** | |
240 | * pcpu_mem_alloc - allocate memory | |
241 | * @size: bytes to allocate | |
242 | * | |
243 | * Allocate @size bytes. If @size is smaller than PAGE_SIZE, | |
244 | * kzalloc() is used; otherwise, vmalloc() is used. The returned | |
245 | * memory is always zeroed. | |
246 | * | |
247 | * CONTEXT: | |
248 | * Does GFP_KERNEL allocation. | |
249 | * | |
250 | * RETURNS: | |
251 | * Pointer to the allocated area on success, NULL on failure. | |
252 | */ | |
253 | static void *pcpu_mem_alloc(size_t size) | |
254 | { | |
255 | if (size <= PAGE_SIZE) | |
256 | return kzalloc(size, GFP_KERNEL); | |
257 | else { | |
258 | void *ptr = vmalloc(size); | |
259 | if (ptr) | |
260 | memset(ptr, 0, size); | |
261 | return ptr; | |
262 | } | |
263 | } | |
264 | ||
265 | /** | |
266 | * pcpu_mem_free - free memory | |
267 | * @ptr: memory to free | |
268 | * @size: size of the area | |
269 | * | |
270 | * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). | |
271 | */ | |
272 | static void pcpu_mem_free(void *ptr, size_t size) | |
273 | { | |
274 | if (size <= PAGE_SIZE) | |
275 | kfree(ptr); | |
276 | else | |
277 | vfree(ptr); | |
278 | } | |
279 | ||
280 | /** | |
281 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot | |
282 | * @chunk: chunk of interest | |
283 | * @oslot: the previous slot it was on | |
284 | * | |
285 | * This function is called after an allocation or free changed @chunk. | |
286 | * New slot according to the changed state is determined and @chunk is | |
287 | * moved to the slot. Note that the reserved chunk is never put on | |
288 | * chunk slots. | |
289 | * | |
290 | * CONTEXT: | |
291 | * pcpu_lock. | |
292 | */ | |
293 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | |
294 | { | |
295 | int nslot = pcpu_chunk_slot(chunk); | |
296 | ||
297 | if (chunk != pcpu_reserved_chunk && oslot != nslot) { | |
298 | if (oslot < nslot) | |
299 | list_move(&chunk->list, &pcpu_slot[nslot]); | |
300 | else | |
301 | list_move_tail(&chunk->list, &pcpu_slot[nslot]); | |
302 | } | |
303 | } | |
304 | ||
305 | /** | |
306 | * pcpu_chunk_addr_search - determine chunk containing specified address | |
307 | * @addr: address for which the chunk needs to be determined. | |
308 | * | |
309 | * RETURNS: | |
310 | * The address of the found chunk. | |
311 | */ | |
312 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |
313 | { | |
314 | void *first_start = pcpu_first_chunk->vm->addr; | |
315 | ||
316 | /* is it in the first chunk? */ | |
317 | if (addr >= first_start && addr < first_start + pcpu_unit_size) { | |
318 | /* is it in the reserved area? */ | |
319 | if (addr < first_start + pcpu_reserved_chunk_limit) | |
320 | return pcpu_reserved_chunk; | |
321 | return pcpu_first_chunk; | |
322 | } | |
323 | ||
324 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); | |
325 | } | |
326 | ||
327 | /** | |
328 | * pcpu_extend_area_map - extend area map for allocation | |
329 | * @chunk: target chunk | |
330 | * | |
331 | * Extend area map of @chunk so that it can accomodate an allocation. | |
332 | * A single allocation can split an area into three areas, so this | |
333 | * function makes sure that @chunk->map has at least two extra slots. | |
334 | * | |
335 | * CONTEXT: | |
336 | * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired | |
337 | * if area map is extended. | |
338 | * | |
339 | * RETURNS: | |
340 | * 0 if noop, 1 if successfully extended, -errno on failure. | |
341 | */ | |
342 | static int pcpu_extend_area_map(struct pcpu_chunk *chunk) | |
343 | { | |
344 | int new_alloc; | |
345 | int *new; | |
346 | size_t size; | |
347 | ||
348 | /* has enough? */ | |
349 | if (chunk->map_alloc >= chunk->map_used + 2) | |
350 | return 0; | |
351 | ||
352 | spin_unlock_irq(&pcpu_lock); | |
353 | ||
354 | new_alloc = PCPU_DFL_MAP_ALLOC; | |
355 | while (new_alloc < chunk->map_used + 2) | |
356 | new_alloc *= 2; | |
357 | ||
358 | new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); | |
359 | if (!new) { | |
360 | spin_lock_irq(&pcpu_lock); | |
361 | return -ENOMEM; | |
362 | } | |
363 | ||
364 | /* | |
365 | * Acquire pcpu_lock and switch to new area map. Only free | |
366 | * could have happened inbetween, so map_used couldn't have | |
367 | * grown. | |
368 | */ | |
369 | spin_lock_irq(&pcpu_lock); | |
370 | BUG_ON(new_alloc < chunk->map_used + 2); | |
371 | ||
372 | size = chunk->map_alloc * sizeof(chunk->map[0]); | |
373 | memcpy(new, chunk->map, size); | |
374 | ||
375 | /* | |
376 | * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is | |
377 | * one of the first chunks and still using static map. | |
378 | */ | |
379 | if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) | |
380 | pcpu_mem_free(chunk->map, size); | |
381 | ||
382 | chunk->map_alloc = new_alloc; | |
383 | chunk->map = new; | |
384 | return 0; | |
385 | } | |
386 | ||
387 | /** | |
388 | * pcpu_split_block - split a map block | |
389 | * @chunk: chunk of interest | |
390 | * @i: index of map block to split | |
391 | * @head: head size in bytes (can be 0) | |
392 | * @tail: tail size in bytes (can be 0) | |
393 | * | |
394 | * Split the @i'th map block into two or three blocks. If @head is | |
395 | * non-zero, @head bytes block is inserted before block @i moving it | |
396 | * to @i+1 and reducing its size by @head bytes. | |
397 | * | |
398 | * If @tail is non-zero, the target block, which can be @i or @i+1 | |
399 | * depending on @head, is reduced by @tail bytes and @tail byte block | |
400 | * is inserted after the target block. | |
401 | * | |
402 | * @chunk->map must have enough free slots to accomodate the split. | |
403 | * | |
404 | * CONTEXT: | |
405 | * pcpu_lock. | |
406 | */ | |
407 | static void pcpu_split_block(struct pcpu_chunk *chunk, int i, | |
408 | int head, int tail) | |
409 | { | |
410 | int nr_extra = !!head + !!tail; | |
411 | ||
412 | BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); | |
413 | ||
414 | /* insert new subblocks */ | |
415 | memmove(&chunk->map[i + nr_extra], &chunk->map[i], | |
416 | sizeof(chunk->map[0]) * (chunk->map_used - i)); | |
417 | chunk->map_used += nr_extra; | |
418 | ||
419 | if (head) { | |
420 | chunk->map[i + 1] = chunk->map[i] - head; | |
421 | chunk->map[i++] = head; | |
422 | } | |
423 | if (tail) { | |
424 | chunk->map[i++] -= tail; | |
425 | chunk->map[i] = tail; | |
426 | } | |
427 | } | |
428 | ||
429 | /** | |
430 | * pcpu_alloc_area - allocate area from a pcpu_chunk | |
431 | * @chunk: chunk of interest | |
432 | * @size: wanted size in bytes | |
433 | * @align: wanted align | |
434 | * | |
435 | * Try to allocate @size bytes area aligned at @align from @chunk. | |
436 | * Note that this function only allocates the offset. It doesn't | |
437 | * populate or map the area. | |
438 | * | |
439 | * @chunk->map must have at least two free slots. | |
440 | * | |
441 | * CONTEXT: | |
442 | * pcpu_lock. | |
443 | * | |
444 | * RETURNS: | |
445 | * Allocated offset in @chunk on success, -1 if no matching area is | |
446 | * found. | |
447 | */ | |
448 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | |
449 | { | |
450 | int oslot = pcpu_chunk_slot(chunk); | |
451 | int max_contig = 0; | |
452 | int i, off; | |
453 | ||
454 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { | |
455 | bool is_last = i + 1 == chunk->map_used; | |
456 | int head, tail; | |
457 | ||
458 | /* extra for alignment requirement */ | |
459 | head = ALIGN(off, align) - off; | |
460 | BUG_ON(i == 0 && head != 0); | |
461 | ||
462 | if (chunk->map[i] < 0) | |
463 | continue; | |
464 | if (chunk->map[i] < head + size) { | |
465 | max_contig = max(chunk->map[i], max_contig); | |
466 | continue; | |
467 | } | |
468 | ||
469 | /* | |
470 | * If head is small or the previous block is free, | |
471 | * merge'em. Note that 'small' is defined as smaller | |
472 | * than sizeof(int), which is very small but isn't too | |
473 | * uncommon for percpu allocations. | |
474 | */ | |
475 | if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { | |
476 | if (chunk->map[i - 1] > 0) | |
477 | chunk->map[i - 1] += head; | |
478 | else { | |
479 | chunk->map[i - 1] -= head; | |
480 | chunk->free_size -= head; | |
481 | } | |
482 | chunk->map[i] -= head; | |
483 | off += head; | |
484 | head = 0; | |
485 | } | |
486 | ||
487 | /* if tail is small, just keep it around */ | |
488 | tail = chunk->map[i] - head - size; | |
489 | if (tail < sizeof(int)) | |
490 | tail = 0; | |
491 | ||
492 | /* split if warranted */ | |
493 | if (head || tail) { | |
494 | pcpu_split_block(chunk, i, head, tail); | |
495 | if (head) { | |
496 | i++; | |
497 | off += head; | |
498 | max_contig = max(chunk->map[i - 1], max_contig); | |
499 | } | |
500 | if (tail) | |
501 | max_contig = max(chunk->map[i + 1], max_contig); | |
502 | } | |
503 | ||
504 | /* update hint and mark allocated */ | |
505 | if (is_last) | |
506 | chunk->contig_hint = max_contig; /* fully scanned */ | |
507 | else | |
508 | chunk->contig_hint = max(chunk->contig_hint, | |
509 | max_contig); | |
510 | ||
511 | chunk->free_size -= chunk->map[i]; | |
512 | chunk->map[i] = -chunk->map[i]; | |
513 | ||
514 | pcpu_chunk_relocate(chunk, oslot); | |
515 | return off; | |
516 | } | |
517 | ||
518 | chunk->contig_hint = max_contig; /* fully scanned */ | |
519 | pcpu_chunk_relocate(chunk, oslot); | |
520 | ||
521 | /* tell the upper layer that this chunk has no matching area */ | |
522 | return -1; | |
523 | } | |
524 | ||
525 | /** | |
526 | * pcpu_free_area - free area to a pcpu_chunk | |
527 | * @chunk: chunk of interest | |
528 | * @freeme: offset of area to free | |
529 | * | |
530 | * Free area starting from @freeme to @chunk. Note that this function | |
531 | * only modifies the allocation map. It doesn't depopulate or unmap | |
532 | * the area. | |
533 | * | |
534 | * CONTEXT: | |
535 | * pcpu_lock. | |
536 | */ | |
537 | static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) | |
538 | { | |
539 | int oslot = pcpu_chunk_slot(chunk); | |
540 | int i, off; | |
541 | ||
542 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) | |
543 | if (off == freeme) | |
544 | break; | |
545 | BUG_ON(off != freeme); | |
546 | BUG_ON(chunk->map[i] > 0); | |
547 | ||
548 | chunk->map[i] = -chunk->map[i]; | |
549 | chunk->free_size += chunk->map[i]; | |
550 | ||
551 | /* merge with previous? */ | |
552 | if (i > 0 && chunk->map[i - 1] >= 0) { | |
553 | chunk->map[i - 1] += chunk->map[i]; | |
554 | chunk->map_used--; | |
555 | memmove(&chunk->map[i], &chunk->map[i + 1], | |
556 | (chunk->map_used - i) * sizeof(chunk->map[0])); | |
557 | i--; | |
558 | } | |
559 | /* merge with next? */ | |
560 | if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { | |
561 | chunk->map[i] += chunk->map[i + 1]; | |
562 | chunk->map_used--; | |
563 | memmove(&chunk->map[i + 1], &chunk->map[i + 2], | |
564 | (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); | |
565 | } | |
566 | ||
567 | chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); | |
568 | pcpu_chunk_relocate(chunk, oslot); | |
569 | } | |
570 | ||
571 | /** | |
572 | * pcpu_get_pages_and_bitmap - get temp pages array and bitmap | |
573 | * @chunk: chunk of interest | |
574 | * @bitmapp: output parameter for bitmap | |
575 | * @may_alloc: may allocate the array | |
576 | * | |
577 | * Returns pointer to array of pointers to struct page and bitmap, | |
578 | * both of which can be indexed with pcpu_page_idx(). The returned | |
579 | * array is cleared to zero and *@bitmapp is copied from | |
580 | * @chunk->populated. Note that there is only one array and bitmap | |
581 | * and access exclusion is the caller's responsibility. | |
582 | * | |
583 | * CONTEXT: | |
584 | * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. | |
585 | * Otherwise, don't care. | |
586 | * | |
587 | * RETURNS: | |
588 | * Pointer to temp pages array on success, NULL on failure. | |
589 | */ | |
590 | static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, | |
591 | unsigned long **bitmapp, | |
592 | bool may_alloc) | |
593 | { | |
594 | static struct page **pages; | |
595 | static unsigned long *bitmap; | |
596 | size_t pages_size = num_possible_cpus() * pcpu_unit_pages * | |
597 | sizeof(pages[0]); | |
598 | size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * | |
599 | sizeof(unsigned long); | |
600 | ||
601 | if (!pages || !bitmap) { | |
602 | if (may_alloc && !pages) | |
603 | pages = pcpu_mem_alloc(pages_size); | |
604 | if (may_alloc && !bitmap) | |
605 | bitmap = pcpu_mem_alloc(bitmap_size); | |
606 | if (!pages || !bitmap) | |
607 | return NULL; | |
608 | } | |
609 | ||
610 | memset(pages, 0, pages_size); | |
611 | bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); | |
612 | ||
613 | *bitmapp = bitmap; | |
614 | return pages; | |
615 | } | |
616 | ||
617 | /** | |
618 | * pcpu_free_pages - free pages which were allocated for @chunk | |
619 | * @chunk: chunk pages were allocated for | |
620 | * @pages: array of pages to be freed, indexed by pcpu_page_idx() | |
621 | * @populated: populated bitmap | |
622 | * @page_start: page index of the first page to be freed | |
623 | * @page_end: page index of the last page to be freed + 1 | |
624 | * | |
625 | * Free pages [@page_start and @page_end) in @pages for all units. | |
626 | * The pages were allocated for @chunk. | |
627 | */ | |
628 | static void pcpu_free_pages(struct pcpu_chunk *chunk, | |
629 | struct page **pages, unsigned long *populated, | |
630 | int page_start, int page_end) | |
631 | { | |
632 | unsigned int cpu; | |
633 | int i; | |
634 | ||
635 | for_each_possible_cpu(cpu) { | |
636 | for (i = page_start; i < page_end; i++) { | |
637 | struct page *page = pages[pcpu_page_idx(cpu, i)]; | |
638 | ||
639 | if (page) | |
640 | __free_page(page); | |
641 | } | |
642 | } | |
643 | } | |
644 | ||
645 | /** | |
646 | * pcpu_alloc_pages - allocates pages for @chunk | |
647 | * @chunk: target chunk | |
648 | * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() | |
649 | * @populated: populated bitmap | |
650 | * @page_start: page index of the first page to be allocated | |
651 | * @page_end: page index of the last page to be allocated + 1 | |
652 | * | |
653 | * Allocate pages [@page_start,@page_end) into @pages for all units. | |
654 | * The allocation is for @chunk. Percpu core doesn't care about the | |
655 | * content of @pages and will pass it verbatim to pcpu_map_pages(). | |
656 | */ | |
657 | static int pcpu_alloc_pages(struct pcpu_chunk *chunk, | |
658 | struct page **pages, unsigned long *populated, | |
659 | int page_start, int page_end) | |
660 | { | |
661 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; | |
662 | unsigned int cpu; | |
663 | int i; | |
664 | ||
665 | for_each_possible_cpu(cpu) { | |
666 | for (i = page_start; i < page_end; i++) { | |
667 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; | |
668 | ||
669 | *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); | |
670 | if (!*pagep) { | |
671 | pcpu_free_pages(chunk, pages, populated, | |
672 | page_start, page_end); | |
673 | return -ENOMEM; | |
674 | } | |
675 | } | |
676 | } | |
677 | return 0; | |
678 | } | |
679 | ||
680 | /** | |
681 | * pcpu_pre_unmap_flush - flush cache prior to unmapping | |
682 | * @chunk: chunk the regions to be flushed belongs to | |
683 | * @page_start: page index of the first page to be flushed | |
684 | * @page_end: page index of the last page to be flushed + 1 | |
685 | * | |
686 | * Pages in [@page_start,@page_end) of @chunk are about to be | |
687 | * unmapped. Flush cache. As each flushing trial can be very | |
688 | * expensive, issue flush on the whole region at once rather than | |
689 | * doing it for each cpu. This could be an overkill but is more | |
690 | * scalable. | |
691 | */ | |
692 | static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, | |
693 | int page_start, int page_end) | |
694 | { | |
695 | unsigned int last = num_possible_cpus() - 1; | |
696 | ||
697 | flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), | |
698 | pcpu_chunk_addr(chunk, last, page_end)); | |
699 | } | |
700 | ||
701 | static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) | |
702 | { | |
703 | unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); | |
704 | } | |
705 | ||
706 | /** | |
707 | * pcpu_unmap_pages - unmap pages out of a pcpu_chunk | |
708 | * @chunk: chunk of interest | |
709 | * @pages: pages array which can be used to pass information to free | |
710 | * @populated: populated bitmap | |
711 | * @page_start: page index of the first page to unmap | |
712 | * @page_end: page index of the last page to unmap + 1 | |
713 | * | |
714 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. | |
715 | * Corresponding elements in @pages were cleared by the caller and can | |
716 | * be used to carry information to pcpu_free_pages() which will be | |
717 | * called after all unmaps are finished. The caller should call | |
718 | * proper pre/post flush functions. | |
719 | */ | |
720 | static void pcpu_unmap_pages(struct pcpu_chunk *chunk, | |
721 | struct page **pages, unsigned long *populated, | |
722 | int page_start, int page_end) | |
723 | { | |
724 | unsigned int cpu; | |
725 | int i; | |
726 | ||
727 | for_each_possible_cpu(cpu) { | |
728 | for (i = page_start; i < page_end; i++) { | |
729 | struct page *page; | |
730 | ||
731 | page = pcpu_chunk_page(chunk, cpu, i); | |
732 | WARN_ON(!page); | |
733 | pages[pcpu_page_idx(cpu, i)] = page; | |
734 | } | |
735 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), | |
736 | page_end - page_start); | |
737 | } | |
738 | ||
739 | for (i = page_start; i < page_end; i++) | |
740 | __clear_bit(i, populated); | |
741 | } | |
742 | ||
743 | /** | |
744 | * pcpu_post_unmap_tlb_flush - flush TLB after unmapping | |
745 | * @chunk: pcpu_chunk the regions to be flushed belong to | |
746 | * @page_start: page index of the first page to be flushed | |
747 | * @page_end: page index of the last page to be flushed + 1 | |
748 | * | |
749 | * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush | |
750 | * TLB for the regions. This can be skipped if the area is to be | |
751 | * returned to vmalloc as vmalloc will handle TLB flushing lazily. | |
752 | * | |
753 | * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once | |
754 | * for the whole region. | |
755 | */ | |
756 | static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, | |
757 | int page_start, int page_end) | |
758 | { | |
759 | unsigned int last = num_possible_cpus() - 1; | |
760 | ||
761 | flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), | |
762 | pcpu_chunk_addr(chunk, last, page_end)); | |
763 | } | |
764 | ||
765 | static int __pcpu_map_pages(unsigned long addr, struct page **pages, | |
766 | int nr_pages) | |
767 | { | |
768 | return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, | |
769 | PAGE_KERNEL, pages); | |
770 | } | |
771 | ||
772 | /** | |
773 | * pcpu_map_pages - map pages into a pcpu_chunk | |
774 | * @chunk: chunk of interest | |
775 | * @pages: pages array containing pages to be mapped | |
776 | * @populated: populated bitmap | |
777 | * @page_start: page index of the first page to map | |
778 | * @page_end: page index of the last page to map + 1 | |
779 | * | |
780 | * For each cpu, map pages [@page_start,@page_end) into @chunk. The | |
781 | * caller is responsible for calling pcpu_post_map_flush() after all | |
782 | * mappings are complete. | |
783 | * | |
784 | * This function is responsible for setting corresponding bits in | |
785 | * @chunk->populated bitmap and whatever is necessary for reverse | |
786 | * lookup (addr -> chunk). | |
787 | */ | |
788 | static int pcpu_map_pages(struct pcpu_chunk *chunk, | |
789 | struct page **pages, unsigned long *populated, | |
790 | int page_start, int page_end) | |
791 | { | |
792 | unsigned int cpu, tcpu; | |
793 | int i, err; | |
794 | ||
795 | for_each_possible_cpu(cpu) { | |
796 | err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), | |
797 | &pages[pcpu_page_idx(cpu, page_start)], | |
798 | page_end - page_start); | |
799 | if (err < 0) | |
800 | goto err; | |
801 | } | |
802 | ||
803 | /* mapping successful, link chunk and mark populated */ | |
804 | for (i = page_start; i < page_end; i++) { | |
805 | for_each_possible_cpu(cpu) | |
806 | pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], | |
807 | chunk); | |
808 | __set_bit(i, populated); | |
809 | } | |
810 | ||
811 | return 0; | |
812 | ||
813 | err: | |
814 | for_each_possible_cpu(tcpu) { | |
815 | if (tcpu == cpu) | |
816 | break; | |
817 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), | |
818 | page_end - page_start); | |
819 | } | |
820 | return err; | |
821 | } | |
822 | ||
823 | /** | |
824 | * pcpu_post_map_flush - flush cache after mapping | |
825 | * @chunk: pcpu_chunk the regions to be flushed belong to | |
826 | * @page_start: page index of the first page to be flushed | |
827 | * @page_end: page index of the last page to be flushed + 1 | |
828 | * | |
829 | * Pages [@page_start,@page_end) of @chunk have been mapped. Flush | |
830 | * cache. | |
831 | * | |
832 | * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once | |
833 | * for the whole region. | |
834 | */ | |
835 | static void pcpu_post_map_flush(struct pcpu_chunk *chunk, | |
836 | int page_start, int page_end) | |
837 | { | |
838 | unsigned int last = num_possible_cpus() - 1; | |
839 | ||
840 | /* flush at once, please read comments in pcpu_unmap() */ | |
841 | flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start), | |
842 | pcpu_chunk_addr(chunk, last, page_end)); | |
843 | } | |
844 | ||
845 | /** | |
846 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk | |
847 | * @chunk: chunk to depopulate | |
848 | * @off: offset to the area to depopulate | |
849 | * @size: size of the area to depopulate in bytes | |
850 | * @flush: whether to flush cache and tlb or not | |
851 | * | |
852 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) | |
853 | * from @chunk. If @flush is true, vcache is flushed before unmapping | |
854 | * and tlb after. | |
855 | * | |
856 | * CONTEXT: | |
857 | * pcpu_alloc_mutex. | |
858 | */ | |
859 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) | |
860 | { | |
861 | int page_start = PFN_DOWN(off); | |
862 | int page_end = PFN_UP(off + size); | |
863 | struct page **pages; | |
864 | unsigned long *populated; | |
865 | int rs, re; | |
866 | ||
867 | /* quick path, check whether it's empty already */ | |
868 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | |
869 | if (rs == page_start && re == page_end) | |
870 | return; | |
871 | break; | |
872 | } | |
873 | ||
874 | /* immutable chunks can't be depopulated */ | |
875 | WARN_ON(chunk->immutable); | |
876 | ||
877 | /* | |
878 | * If control reaches here, there must have been at least one | |
879 | * successful population attempt so the temp pages array must | |
880 | * be available now. | |
881 | */ | |
882 | pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); | |
883 | BUG_ON(!pages); | |
884 | ||
885 | /* unmap and free */ | |
886 | pcpu_pre_unmap_flush(chunk, page_start, page_end); | |
887 | ||
888 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) | |
889 | pcpu_unmap_pages(chunk, pages, populated, rs, re); | |
890 | ||
891 | /* no need to flush tlb, vmalloc will handle it lazily */ | |
892 | ||
893 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) | |
894 | pcpu_free_pages(chunk, pages, populated, rs, re); | |
895 | ||
896 | /* commit new bitmap */ | |
897 | bitmap_copy(chunk->populated, populated, pcpu_unit_pages); | |
898 | } | |
899 | ||
900 | /** | |
901 | * pcpu_populate_chunk - populate and map an area of a pcpu_chunk | |
902 | * @chunk: chunk of interest | |
903 | * @off: offset to the area to populate | |
904 | * @size: size of the area to populate in bytes | |
905 | * | |
906 | * For each cpu, populate and map pages [@page_start,@page_end) into | |
907 | * @chunk. The area is cleared on return. | |
908 | * | |
909 | * CONTEXT: | |
910 | * pcpu_alloc_mutex, does GFP_KERNEL allocation. | |
911 | */ | |
912 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | |
913 | { | |
914 | int page_start = PFN_DOWN(off); | |
915 | int page_end = PFN_UP(off + size); | |
916 | int free_end = page_start, unmap_end = page_start; | |
917 | struct page **pages; | |
918 | unsigned long *populated; | |
919 | unsigned int cpu; | |
920 | int rs, re, rc; | |
921 | ||
922 | /* quick path, check whether all pages are already there */ | |
923 | pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { | |
924 | if (rs == page_start && re == page_end) | |
925 | goto clear; | |
926 | break; | |
927 | } | |
928 | ||
929 | /* need to allocate and map pages, this chunk can't be immutable */ | |
930 | WARN_ON(chunk->immutable); | |
931 | ||
932 | pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); | |
933 | if (!pages) | |
934 | return -ENOMEM; | |
935 | ||
936 | /* alloc and map */ | |
937 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | |
938 | rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); | |
939 | if (rc) | |
940 | goto err_free; | |
941 | free_end = re; | |
942 | } | |
943 | ||
944 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { | |
945 | rc = pcpu_map_pages(chunk, pages, populated, rs, re); | |
946 | if (rc) | |
947 | goto err_unmap; | |
948 | unmap_end = re; | |
949 | } | |
950 | pcpu_post_map_flush(chunk, page_start, page_end); | |
951 | ||
952 | /* commit new bitmap */ | |
953 | bitmap_copy(chunk->populated, populated, pcpu_unit_pages); | |
954 | clear: | |
955 | for_each_possible_cpu(cpu) | |
956 | memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0, | |
957 | size); | |
958 | return 0; | |
959 | ||
960 | err_unmap: | |
961 | pcpu_pre_unmap_flush(chunk, page_start, unmap_end); | |
962 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) | |
963 | pcpu_unmap_pages(chunk, pages, populated, rs, re); | |
964 | pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); | |
965 | err_free: | |
966 | pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) | |
967 | pcpu_free_pages(chunk, pages, populated, rs, re); | |
968 | return rc; | |
969 | } | |
970 | ||
971 | static void free_pcpu_chunk(struct pcpu_chunk *chunk) | |
972 | { | |
973 | if (!chunk) | |
974 | return; | |
975 | if (chunk->vm) | |
976 | free_vm_area(chunk->vm); | |
977 | pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); | |
978 | kfree(chunk); | |
979 | } | |
980 | ||
981 | static struct pcpu_chunk *alloc_pcpu_chunk(void) | |
982 | { | |
983 | struct pcpu_chunk *chunk; | |
984 | ||
985 | chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); | |
986 | if (!chunk) | |
987 | return NULL; | |
988 | ||
989 | chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); | |
990 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; | |
991 | chunk->map[chunk->map_used++] = pcpu_unit_size; | |
992 | ||
993 | chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); | |
994 | if (!chunk->vm) { | |
995 | free_pcpu_chunk(chunk); | |
996 | return NULL; | |
997 | } | |
998 | ||
999 | INIT_LIST_HEAD(&chunk->list); | |
1000 | chunk->free_size = pcpu_unit_size; | |
1001 | chunk->contig_hint = pcpu_unit_size; | |
1002 | ||
1003 | return chunk; | |
1004 | } | |
1005 | ||
1006 | /** | |
1007 | * pcpu_alloc - the percpu allocator | |
1008 | * @size: size of area to allocate in bytes | |
1009 | * @align: alignment of area (max PAGE_SIZE) | |
1010 | * @reserved: allocate from the reserved chunk if available | |
1011 | * | |
1012 | * Allocate percpu area of @size bytes aligned at @align. | |
1013 | * | |
1014 | * CONTEXT: | |
1015 | * Does GFP_KERNEL allocation. | |
1016 | * | |
1017 | * RETURNS: | |
1018 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1019 | */ | |
1020 | static void *pcpu_alloc(size_t size, size_t align, bool reserved) | |
1021 | { | |
1022 | struct pcpu_chunk *chunk; | |
1023 | int slot, off; | |
1024 | ||
1025 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { | |
1026 | WARN(true, "illegal size (%zu) or align (%zu) for " | |
1027 | "percpu allocation\n", size, align); | |
1028 | return NULL; | |
1029 | } | |
1030 | ||
1031 | mutex_lock(&pcpu_alloc_mutex); | |
1032 | spin_lock_irq(&pcpu_lock); | |
1033 | ||
1034 | /* serve reserved allocations from the reserved chunk if available */ | |
1035 | if (reserved && pcpu_reserved_chunk) { | |
1036 | chunk = pcpu_reserved_chunk; | |
1037 | if (size > chunk->contig_hint || | |
1038 | pcpu_extend_area_map(chunk) < 0) | |
1039 | goto fail_unlock; | |
1040 | off = pcpu_alloc_area(chunk, size, align); | |
1041 | if (off >= 0) | |
1042 | goto area_found; | |
1043 | goto fail_unlock; | |
1044 | } | |
1045 | ||
1046 | restart: | |
1047 | /* search through normal chunks */ | |
1048 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { | |
1049 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | |
1050 | if (size > chunk->contig_hint) | |
1051 | continue; | |
1052 | ||
1053 | switch (pcpu_extend_area_map(chunk)) { | |
1054 | case 0: | |
1055 | break; | |
1056 | case 1: | |
1057 | goto restart; /* pcpu_lock dropped, restart */ | |
1058 | default: | |
1059 | goto fail_unlock; | |
1060 | } | |
1061 | ||
1062 | off = pcpu_alloc_area(chunk, size, align); | |
1063 | if (off >= 0) | |
1064 | goto area_found; | |
1065 | } | |
1066 | } | |
1067 | ||
1068 | /* hmmm... no space left, create a new chunk */ | |
1069 | spin_unlock_irq(&pcpu_lock); | |
1070 | ||
1071 | chunk = alloc_pcpu_chunk(); | |
1072 | if (!chunk) | |
1073 | goto fail_unlock_mutex; | |
1074 | ||
1075 | spin_lock_irq(&pcpu_lock); | |
1076 | pcpu_chunk_relocate(chunk, -1); | |
1077 | goto restart; | |
1078 | ||
1079 | area_found: | |
1080 | spin_unlock_irq(&pcpu_lock); | |
1081 | ||
1082 | /* populate, map and clear the area */ | |
1083 | if (pcpu_populate_chunk(chunk, off, size)) { | |
1084 | spin_lock_irq(&pcpu_lock); | |
1085 | pcpu_free_area(chunk, off); | |
1086 | goto fail_unlock; | |
1087 | } | |
1088 | ||
1089 | mutex_unlock(&pcpu_alloc_mutex); | |
1090 | ||
1091 | return __addr_to_pcpu_ptr(chunk->vm->addr + off); | |
1092 | ||
1093 | fail_unlock: | |
1094 | spin_unlock_irq(&pcpu_lock); | |
1095 | fail_unlock_mutex: | |
1096 | mutex_unlock(&pcpu_alloc_mutex); | |
1097 | return NULL; | |
1098 | } | |
1099 | ||
1100 | /** | |
1101 | * __alloc_percpu - allocate dynamic percpu area | |
1102 | * @size: size of area to allocate in bytes | |
1103 | * @align: alignment of area (max PAGE_SIZE) | |
1104 | * | |
1105 | * Allocate percpu area of @size bytes aligned at @align. Might | |
1106 | * sleep. Might trigger writeouts. | |
1107 | * | |
1108 | * CONTEXT: | |
1109 | * Does GFP_KERNEL allocation. | |
1110 | * | |
1111 | * RETURNS: | |
1112 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1113 | */ | |
1114 | void *__alloc_percpu(size_t size, size_t align) | |
1115 | { | |
1116 | return pcpu_alloc(size, align, false); | |
1117 | } | |
1118 | EXPORT_SYMBOL_GPL(__alloc_percpu); | |
1119 | ||
1120 | /** | |
1121 | * __alloc_reserved_percpu - allocate reserved percpu area | |
1122 | * @size: size of area to allocate in bytes | |
1123 | * @align: alignment of area (max PAGE_SIZE) | |
1124 | * | |
1125 | * Allocate percpu area of @size bytes aligned at @align from reserved | |
1126 | * percpu area if arch has set it up; otherwise, allocation is served | |
1127 | * from the same dynamic area. Might sleep. Might trigger writeouts. | |
1128 | * | |
1129 | * CONTEXT: | |
1130 | * Does GFP_KERNEL allocation. | |
1131 | * | |
1132 | * RETURNS: | |
1133 | * Percpu pointer to the allocated area on success, NULL on failure. | |
1134 | */ | |
1135 | void *__alloc_reserved_percpu(size_t size, size_t align) | |
1136 | { | |
1137 | return pcpu_alloc(size, align, true); | |
1138 | } | |
1139 | ||
1140 | /** | |
1141 | * pcpu_reclaim - reclaim fully free chunks, workqueue function | |
1142 | * @work: unused | |
1143 | * | |
1144 | * Reclaim all fully free chunks except for the first one. | |
1145 | * | |
1146 | * CONTEXT: | |
1147 | * workqueue context. | |
1148 | */ | |
1149 | static void pcpu_reclaim(struct work_struct *work) | |
1150 | { | |
1151 | LIST_HEAD(todo); | |
1152 | struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; | |
1153 | struct pcpu_chunk *chunk, *next; | |
1154 | ||
1155 | mutex_lock(&pcpu_alloc_mutex); | |
1156 | spin_lock_irq(&pcpu_lock); | |
1157 | ||
1158 | list_for_each_entry_safe(chunk, next, head, list) { | |
1159 | WARN_ON(chunk->immutable); | |
1160 | ||
1161 | /* spare the first one */ | |
1162 | if (chunk == list_first_entry(head, struct pcpu_chunk, list)) | |
1163 | continue; | |
1164 | ||
1165 | list_move(&chunk->list, &todo); | |
1166 | } | |
1167 | ||
1168 | spin_unlock_irq(&pcpu_lock); | |
1169 | mutex_unlock(&pcpu_alloc_mutex); | |
1170 | ||
1171 | list_for_each_entry_safe(chunk, next, &todo, list) { | |
1172 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); | |
1173 | free_pcpu_chunk(chunk); | |
1174 | } | |
1175 | } | |
1176 | ||
1177 | /** | |
1178 | * free_percpu - free percpu area | |
1179 | * @ptr: pointer to area to free | |
1180 | * | |
1181 | * Free percpu area @ptr. | |
1182 | * | |
1183 | * CONTEXT: | |
1184 | * Can be called from atomic context. | |
1185 | */ | |
1186 | void free_percpu(void *ptr) | |
1187 | { | |
1188 | void *addr = __pcpu_ptr_to_addr(ptr); | |
1189 | struct pcpu_chunk *chunk; | |
1190 | unsigned long flags; | |
1191 | int off; | |
1192 | ||
1193 | if (!ptr) | |
1194 | return; | |
1195 | ||
1196 | spin_lock_irqsave(&pcpu_lock, flags); | |
1197 | ||
1198 | chunk = pcpu_chunk_addr_search(addr); | |
1199 | off = addr - chunk->vm->addr; | |
1200 | ||
1201 | pcpu_free_area(chunk, off); | |
1202 | ||
1203 | /* if there are more than one fully free chunks, wake up grim reaper */ | |
1204 | if (chunk->free_size == pcpu_unit_size) { | |
1205 | struct pcpu_chunk *pos; | |
1206 | ||
1207 | list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) | |
1208 | if (pos != chunk) { | |
1209 | schedule_work(&pcpu_reclaim_work); | |
1210 | break; | |
1211 | } | |
1212 | } | |
1213 | ||
1214 | spin_unlock_irqrestore(&pcpu_lock, flags); | |
1215 | } | |
1216 | EXPORT_SYMBOL_GPL(free_percpu); | |
1217 | ||
1218 | /** | |
1219 | * pcpu_setup_first_chunk - initialize the first percpu chunk | |
1220 | * @static_size: the size of static percpu area in bytes | |
1221 | * @reserved_size: the size of reserved percpu area in bytes, 0 for none | |
1222 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | |
1223 | * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE | |
1224 | * @base_addr: mapped address | |
1225 | * | |
1226 | * Initialize the first percpu chunk which contains the kernel static | |
1227 | * perpcu area. This function is to be called from arch percpu area | |
1228 | * setup path. | |
1229 | * | |
1230 | * @reserved_size, if non-zero, specifies the amount of bytes to | |
1231 | * reserve after the static area in the first chunk. This reserves | |
1232 | * the first chunk such that it's available only through reserved | |
1233 | * percpu allocation. This is primarily used to serve module percpu | |
1234 | * static areas on architectures where the addressing model has | |
1235 | * limited offset range for symbol relocations to guarantee module | |
1236 | * percpu symbols fall inside the relocatable range. | |
1237 | * | |
1238 | * @dyn_size, if non-negative, determines the number of bytes | |
1239 | * available for dynamic allocation in the first chunk. Specifying | |
1240 | * non-negative value makes percpu leave alone the area beyond | |
1241 | * @static_size + @reserved_size + @dyn_size. | |
1242 | * | |
1243 | * @unit_size specifies unit size and must be aligned to PAGE_SIZE and | |
1244 | * equal to or larger than @static_size + @reserved_size + if | |
1245 | * non-negative, @dyn_size. | |
1246 | * | |
1247 | * The caller should have mapped the first chunk at @base_addr and | |
1248 | * copied static data to each unit. | |
1249 | * | |
1250 | * If the first chunk ends up with both reserved and dynamic areas, it | |
1251 | * is served by two chunks - one to serve the core static and reserved | |
1252 | * areas and the other for the dynamic area. They share the same vm | |
1253 | * and page map but uses different area allocation map to stay away | |
1254 | * from each other. The latter chunk is circulated in the chunk slots | |
1255 | * and available for dynamic allocation like any other chunks. | |
1256 | * | |
1257 | * RETURNS: | |
1258 | * The determined pcpu_unit_size which can be used to initialize | |
1259 | * percpu access. | |
1260 | */ | |
1261 | size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, | |
1262 | ssize_t dyn_size, size_t unit_size, | |
1263 | void *base_addr) | |
1264 | { | |
1265 | static struct vm_struct first_vm; | |
1266 | static int smap[2], dmap[2]; | |
1267 | size_t size_sum = static_size + reserved_size + | |
1268 | (dyn_size >= 0 ? dyn_size : 0); | |
1269 | struct pcpu_chunk *schunk, *dchunk = NULL; | |
1270 | int i; | |
1271 | ||
1272 | /* santiy checks */ | |
1273 | BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || | |
1274 | ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); | |
1275 | BUG_ON(!static_size); | |
1276 | BUG_ON(!base_addr); | |
1277 | BUG_ON(unit_size < size_sum); | |
1278 | BUG_ON(unit_size & ~PAGE_MASK); | |
1279 | BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE); | |
1280 | ||
1281 | pcpu_unit_pages = unit_size >> PAGE_SHIFT; | |
1282 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; | |
1283 | pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; | |
1284 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + | |
1285 | BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); | |
1286 | ||
1287 | if (dyn_size < 0) | |
1288 | dyn_size = pcpu_unit_size - static_size - reserved_size; | |
1289 | ||
1290 | first_vm.flags = VM_ALLOC; | |
1291 | first_vm.size = pcpu_chunk_size; | |
1292 | first_vm.addr = base_addr; | |
1293 | ||
1294 | /* | |
1295 | * Allocate chunk slots. The additional last slot is for | |
1296 | * empty chunks. | |
1297 | */ | |
1298 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; | |
1299 | pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); | |
1300 | for (i = 0; i < pcpu_nr_slots; i++) | |
1301 | INIT_LIST_HEAD(&pcpu_slot[i]); | |
1302 | ||
1303 | /* | |
1304 | * Initialize static chunk. If reserved_size is zero, the | |
1305 | * static chunk covers static area + dynamic allocation area | |
1306 | * in the first chunk. If reserved_size is not zero, it | |
1307 | * covers static area + reserved area (mostly used for module | |
1308 | * static percpu allocation). | |
1309 | */ | |
1310 | schunk = alloc_bootmem(pcpu_chunk_struct_size); | |
1311 | INIT_LIST_HEAD(&schunk->list); | |
1312 | schunk->vm = &first_vm; | |
1313 | schunk->map = smap; | |
1314 | schunk->map_alloc = ARRAY_SIZE(smap); | |
1315 | schunk->immutable = true; | |
1316 | bitmap_fill(schunk->populated, pcpu_unit_pages); | |
1317 | ||
1318 | if (reserved_size) { | |
1319 | schunk->free_size = reserved_size; | |
1320 | pcpu_reserved_chunk = schunk; | |
1321 | pcpu_reserved_chunk_limit = static_size + reserved_size; | |
1322 | } else { | |
1323 | schunk->free_size = dyn_size; | |
1324 | dyn_size = 0; /* dynamic area covered */ | |
1325 | } | |
1326 | schunk->contig_hint = schunk->free_size; | |
1327 | ||
1328 | schunk->map[schunk->map_used++] = -static_size; | |
1329 | if (schunk->free_size) | |
1330 | schunk->map[schunk->map_used++] = schunk->free_size; | |
1331 | ||
1332 | /* init dynamic chunk if necessary */ | |
1333 | if (dyn_size) { | |
1334 | dchunk = alloc_bootmem(pcpu_chunk_struct_size); | |
1335 | INIT_LIST_HEAD(&dchunk->list); | |
1336 | dchunk->vm = &first_vm; | |
1337 | dchunk->map = dmap; | |
1338 | dchunk->map_alloc = ARRAY_SIZE(dmap); | |
1339 | dchunk->immutable = true; | |
1340 | bitmap_fill(dchunk->populated, pcpu_unit_pages); | |
1341 | ||
1342 | dchunk->contig_hint = dchunk->free_size = dyn_size; | |
1343 | dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; | |
1344 | dchunk->map[dchunk->map_used++] = dchunk->free_size; | |
1345 | } | |
1346 | ||
1347 | /* link the first chunk in */ | |
1348 | pcpu_first_chunk = dchunk ?: schunk; | |
1349 | pcpu_chunk_relocate(pcpu_first_chunk, -1); | |
1350 | ||
1351 | /* we're done */ | |
1352 | pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0); | |
1353 | return pcpu_unit_size; | |
1354 | } | |
1355 | ||
1356 | static size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size, | |
1357 | ssize_t *dyn_sizep) | |
1358 | { | |
1359 | size_t size_sum; | |
1360 | ||
1361 | size_sum = PFN_ALIGN(static_size + reserved_size + | |
1362 | (*dyn_sizep >= 0 ? *dyn_sizep : 0)); | |
1363 | if (*dyn_sizep != 0) | |
1364 | *dyn_sizep = size_sum - static_size - reserved_size; | |
1365 | ||
1366 | return size_sum; | |
1367 | } | |
1368 | ||
1369 | /** | |
1370 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | |
1371 | * @static_size: the size of static percpu area in bytes | |
1372 | * @reserved_size: the size of reserved percpu area in bytes | |
1373 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | |
1374 | * | |
1375 | * This is a helper to ease setting up embedded first percpu chunk and | |
1376 | * can be called where pcpu_setup_first_chunk() is expected. | |
1377 | * | |
1378 | * If this function is used to setup the first chunk, it is allocated | |
1379 | * as a contiguous area using bootmem allocator and used as-is without | |
1380 | * being mapped into vmalloc area. This enables the first chunk to | |
1381 | * piggy back on the linear physical mapping which often uses larger | |
1382 | * page size. | |
1383 | * | |
1384 | * When @dyn_size is positive, dynamic area might be larger than | |
1385 | * specified to fill page alignment. When @dyn_size is auto, | |
1386 | * @dyn_size is just big enough to fill page alignment after static | |
1387 | * and reserved areas. | |
1388 | * | |
1389 | * If the needed size is smaller than the minimum or specified unit | |
1390 | * size, the leftover is returned to the bootmem allocator. | |
1391 | * | |
1392 | * RETURNS: | |
1393 | * The determined pcpu_unit_size which can be used to initialize | |
1394 | * percpu access on success, -errno on failure. | |
1395 | */ | |
1396 | ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size, | |
1397 | ssize_t dyn_size) | |
1398 | { | |
1399 | size_t size_sum, unit_size, chunk_size; | |
1400 | void *base; | |
1401 | unsigned int cpu; | |
1402 | ||
1403 | /* determine parameters and allocate */ | |
1404 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); | |
1405 | ||
1406 | unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | |
1407 | chunk_size = unit_size * num_possible_cpus(); | |
1408 | ||
1409 | base = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE, | |
1410 | __pa(MAX_DMA_ADDRESS)); | |
1411 | if (!base) { | |
1412 | pr_warning("PERCPU: failed to allocate %zu bytes for " | |
1413 | "embedding\n", chunk_size); | |
1414 | return -ENOMEM; | |
1415 | } | |
1416 | ||
1417 | /* return the leftover and copy */ | |
1418 | for_each_possible_cpu(cpu) { | |
1419 | void *ptr = base + cpu * unit_size; | |
1420 | ||
1421 | free_bootmem(__pa(ptr + size_sum), unit_size - size_sum); | |
1422 | memcpy(ptr, __per_cpu_load, static_size); | |
1423 | } | |
1424 | ||
1425 | /* we're ready, commit */ | |
1426 | pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", | |
1427 | size_sum >> PAGE_SHIFT, base, static_size); | |
1428 | ||
1429 | return pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, | |
1430 | unit_size, base); | |
1431 | } | |
1432 | ||
1433 | /** | |
1434 | * pcpu_4k_first_chunk - map the first chunk using PAGE_SIZE pages | |
1435 | * @static_size: the size of static percpu area in bytes | |
1436 | * @reserved_size: the size of reserved percpu area in bytes | |
1437 | * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE | |
1438 | * @free_fn: funtion to free percpu page, always called with PAGE_SIZE | |
1439 | * @populate_pte_fn: function to populate pte | |
1440 | * | |
1441 | * This is a helper to ease setting up embedded first percpu chunk and | |
1442 | * can be called where pcpu_setup_first_chunk() is expected. | |
1443 | * | |
1444 | * This is the basic allocator. Static percpu area is allocated | |
1445 | * page-by-page into vmalloc area. | |
1446 | * | |
1447 | * RETURNS: | |
1448 | * The determined pcpu_unit_size which can be used to initialize | |
1449 | * percpu access on success, -errno on failure. | |
1450 | */ | |
1451 | ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size, | |
1452 | pcpu_fc_alloc_fn_t alloc_fn, | |
1453 | pcpu_fc_free_fn_t free_fn, | |
1454 | pcpu_fc_populate_pte_fn_t populate_pte_fn) | |
1455 | { | |
1456 | static struct vm_struct vm; | |
1457 | int unit_pages; | |
1458 | size_t pages_size; | |
1459 | struct page **pages; | |
1460 | unsigned int cpu; | |
1461 | int i, j; | |
1462 | ssize_t ret; | |
1463 | ||
1464 | unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size, | |
1465 | PCPU_MIN_UNIT_SIZE)); | |
1466 | ||
1467 | /* unaligned allocations can't be freed, round up to page size */ | |
1468 | pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * | |
1469 | sizeof(pages[0])); | |
1470 | pages = alloc_bootmem(pages_size); | |
1471 | ||
1472 | /* allocate pages */ | |
1473 | j = 0; | |
1474 | for_each_possible_cpu(cpu) | |
1475 | for (i = 0; i < unit_pages; i++) { | |
1476 | void *ptr; | |
1477 | ||
1478 | ptr = alloc_fn(cpu, PAGE_SIZE); | |
1479 | if (!ptr) { | |
1480 | pr_warning("PERCPU: failed to allocate " | |
1481 | "4k page for cpu%u\n", cpu); | |
1482 | goto enomem; | |
1483 | } | |
1484 | pages[j++] = virt_to_page(ptr); | |
1485 | } | |
1486 | ||
1487 | /* allocate vm area, map the pages and copy static data */ | |
1488 | vm.flags = VM_ALLOC; | |
1489 | vm.size = num_possible_cpus() * unit_pages << PAGE_SHIFT; | |
1490 | vm_area_register_early(&vm, PAGE_SIZE); | |
1491 | ||
1492 | for_each_possible_cpu(cpu) { | |
1493 | unsigned long unit_addr = (unsigned long)vm.addr + | |
1494 | (cpu * unit_pages << PAGE_SHIFT); | |
1495 | ||
1496 | for (i = 0; i < unit_pages; i++) | |
1497 | populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); | |
1498 | ||
1499 | /* pte already populated, the following shouldn't fail */ | |
1500 | ret = __pcpu_map_pages(unit_addr, &pages[cpu * unit_pages], | |
1501 | unit_pages); | |
1502 | if (ret < 0) | |
1503 | panic("failed to map percpu area, err=%zd\n", ret); | |
1504 | ||
1505 | /* | |
1506 | * FIXME: Archs with virtual cache should flush local | |
1507 | * cache for the linear mapping here - something | |
1508 | * equivalent to flush_cache_vmap() on the local cpu. | |
1509 | * flush_cache_vmap() can't be used as most supporting | |
1510 | * data structures are not set up yet. | |
1511 | */ | |
1512 | ||
1513 | /* copy static data */ | |
1514 | memcpy((void *)unit_addr, __per_cpu_load, static_size); | |
1515 | } | |
1516 | ||
1517 | /* we're ready, commit */ | |
1518 | pr_info("PERCPU: %d 4k pages per cpu, static data %zu bytes\n", | |
1519 | unit_pages, static_size); | |
1520 | ||
1521 | ret = pcpu_setup_first_chunk(static_size, reserved_size, -1, | |
1522 | unit_pages << PAGE_SHIFT, vm.addr); | |
1523 | goto out_free_ar; | |
1524 | ||
1525 | enomem: | |
1526 | while (--j >= 0) | |
1527 | free_fn(page_address(pages[j]), PAGE_SIZE); | |
1528 | ret = -ENOMEM; | |
1529 | out_free_ar: | |
1530 | free_bootmem(__pa(pages), pages_size); | |
1531 | return ret; | |
1532 | } | |
1533 | ||
1534 | /* | |
1535 | * Large page remapping first chunk setup helper | |
1536 | */ | |
1537 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
1538 | struct pcpul_ent { | |
1539 | unsigned int cpu; | |
1540 | void *ptr; | |
1541 | }; | |
1542 | ||
1543 | static size_t pcpul_size; | |
1544 | static size_t pcpul_unit_size; | |
1545 | static struct pcpul_ent *pcpul_map; | |
1546 | static struct vm_struct pcpul_vm; | |
1547 | ||
1548 | /** | |
1549 | * pcpu_lpage_first_chunk - remap the first percpu chunk using large page | |
1550 | * @static_size: the size of static percpu area in bytes | |
1551 | * @reserved_size: the size of reserved percpu area in bytes | |
1552 | * @dyn_size: free size for dynamic allocation in bytes, -1 for auto | |
1553 | * @lpage_size: the size of a large page | |
1554 | * @alloc_fn: function to allocate percpu lpage, always called with lpage_size | |
1555 | * @free_fn: function to free percpu memory, @size <= lpage_size | |
1556 | * @map_fn: function to map percpu lpage, always called with lpage_size | |
1557 | * | |
1558 | * This allocator uses large page as unit. A large page is allocated | |
1559 | * for each cpu and each is remapped into vmalloc area using large | |
1560 | * page mapping. As large page can be quite large, only part of it is | |
1561 | * used for the first chunk. Unused part is returned to the bootmem | |
1562 | * allocator. | |
1563 | * | |
1564 | * So, the large pages are mapped twice - once to the physical mapping | |
1565 | * and to the vmalloc area for the first percpu chunk. The double | |
1566 | * mapping does add one more large TLB entry pressure but still is | |
1567 | * much better than only using 4k mappings while still being NUMA | |
1568 | * friendly. | |
1569 | * | |
1570 | * RETURNS: | |
1571 | * The determined pcpu_unit_size which can be used to initialize | |
1572 | * percpu access on success, -errno on failure. | |
1573 | */ | |
1574 | ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size, | |
1575 | ssize_t dyn_size, size_t lpage_size, | |
1576 | pcpu_fc_alloc_fn_t alloc_fn, | |
1577 | pcpu_fc_free_fn_t free_fn, | |
1578 | pcpu_fc_map_fn_t map_fn) | |
1579 | { | |
1580 | size_t size_sum; | |
1581 | size_t map_size; | |
1582 | unsigned int cpu; | |
1583 | int i, j; | |
1584 | ssize_t ret; | |
1585 | ||
1586 | /* | |
1587 | * Currently supports only single page. Supporting multiple | |
1588 | * pages won't be too difficult if it ever becomes necessary. | |
1589 | */ | |
1590 | size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); | |
1591 | ||
1592 | pcpul_unit_size = lpage_size; | |
1593 | pcpul_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | |
1594 | if (pcpul_size > pcpul_unit_size) { | |
1595 | pr_warning("PERCPU: static data is larger than large page, " | |
1596 | "can't use large page\n"); | |
1597 | return -EINVAL; | |
1598 | } | |
1599 | ||
1600 | /* allocate pointer array and alloc large pages */ | |
1601 | map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0])); | |
1602 | pcpul_map = alloc_bootmem(map_size); | |
1603 | ||
1604 | for_each_possible_cpu(cpu) { | |
1605 | void *ptr; | |
1606 | ||
1607 | ptr = alloc_fn(cpu, lpage_size); | |
1608 | if (!ptr) { | |
1609 | pr_warning("PERCPU: failed to allocate large page " | |
1610 | "for cpu%u\n", cpu); | |
1611 | goto enomem; | |
1612 | } | |
1613 | ||
1614 | /* | |
1615 | * Only use pcpul_size bytes and give back the rest. | |
1616 | * | |
1617 | * Ingo: The lpage_size up-rounding bootmem is needed | |
1618 | * to make sure the partial lpage is still fully RAM - | |
1619 | * it's not well-specified to have a incompatible area | |
1620 | * (unmapped RAM, device memory, etc.) in that hole. | |
1621 | */ | |
1622 | free_fn(ptr + pcpul_size, lpage_size - pcpul_size); | |
1623 | ||
1624 | pcpul_map[cpu].cpu = cpu; | |
1625 | pcpul_map[cpu].ptr = ptr; | |
1626 | ||
1627 | memcpy(ptr, __per_cpu_load, static_size); | |
1628 | } | |
1629 | ||
1630 | /* allocate address and map */ | |
1631 | pcpul_vm.flags = VM_ALLOC; | |
1632 | pcpul_vm.size = num_possible_cpus() * pcpul_unit_size; | |
1633 | vm_area_register_early(&pcpul_vm, pcpul_unit_size); | |
1634 | ||
1635 | for_each_possible_cpu(cpu) | |
1636 | map_fn(pcpul_map[cpu].ptr, pcpul_unit_size, | |
1637 | pcpul_vm.addr + cpu * pcpul_unit_size); | |
1638 | ||
1639 | /* we're ready, commit */ | |
1640 | pr_info("PERCPU: Remapped at %p with large pages, static data " | |
1641 | "%zu bytes\n", pcpul_vm.addr, static_size); | |
1642 | ||
1643 | ret = pcpu_setup_first_chunk(static_size, reserved_size, dyn_size, | |
1644 | pcpul_unit_size, pcpul_vm.addr); | |
1645 | ||
1646 | /* sort pcpul_map array for pcpu_lpage_remapped() */ | |
1647 | for (i = 0; i < num_possible_cpus() - 1; i++) | |
1648 | for (j = i + 1; j < num_possible_cpus(); j++) | |
1649 | if (pcpul_map[i].ptr > pcpul_map[j].ptr) { | |
1650 | struct pcpul_ent tmp = pcpul_map[i]; | |
1651 | pcpul_map[i] = pcpul_map[j]; | |
1652 | pcpul_map[j] = tmp; | |
1653 | } | |
1654 | ||
1655 | return ret; | |
1656 | ||
1657 | enomem: | |
1658 | for_each_possible_cpu(cpu) | |
1659 | if (pcpul_map[cpu].ptr) | |
1660 | free_fn(pcpul_map[cpu].ptr, pcpul_size); | |
1661 | free_bootmem(__pa(pcpul_map), map_size); | |
1662 | return -ENOMEM; | |
1663 | } | |
1664 | ||
1665 | /** | |
1666 | * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area | |
1667 | * @kaddr: the kernel address in question | |
1668 | * | |
1669 | * Determine whether @kaddr falls in the pcpul recycled area. This is | |
1670 | * used by pageattr to detect VM aliases and break up the pcpu large | |
1671 | * page mapping such that the same physical page is not mapped under | |
1672 | * different attributes. | |
1673 | * | |
1674 | * The recycled area is always at the tail of a partially used large | |
1675 | * page. | |
1676 | * | |
1677 | * RETURNS: | |
1678 | * Address of corresponding remapped pcpu address if match is found; | |
1679 | * otherwise, NULL. | |
1680 | */ | |
1681 | void *pcpu_lpage_remapped(void *kaddr) | |
1682 | { | |
1683 | unsigned long unit_mask = pcpul_unit_size - 1; | |
1684 | void *lpage_addr = (void *)((unsigned long)kaddr & ~unit_mask); | |
1685 | unsigned long offset = (unsigned long)kaddr & unit_mask; | |
1686 | int left = 0, right = num_possible_cpus() - 1; | |
1687 | int pos; | |
1688 | ||
1689 | /* pcpul in use at all? */ | |
1690 | if (!pcpul_map) | |
1691 | return NULL; | |
1692 | ||
1693 | /* okay, perform binary search */ | |
1694 | while (left <= right) { | |
1695 | pos = (left + right) / 2; | |
1696 | ||
1697 | if (pcpul_map[pos].ptr < lpage_addr) | |
1698 | left = pos + 1; | |
1699 | else if (pcpul_map[pos].ptr > lpage_addr) | |
1700 | right = pos - 1; | |
1701 | else { | |
1702 | /* it shouldn't be in the area for the first chunk */ | |
1703 | WARN_ON(offset < pcpul_size); | |
1704 | ||
1705 | return pcpul_vm.addr + | |
1706 | pcpul_map[pos].cpu * pcpul_unit_size + offset; | |
1707 | } | |
1708 | } | |
1709 | ||
1710 | return NULL; | |
1711 | } | |
1712 | #endif | |
1713 | ||
1714 | /* | |
1715 | * Generic percpu area setup. | |
1716 | * | |
1717 | * The embedding helper is used because its behavior closely resembles | |
1718 | * the original non-dynamic generic percpu area setup. This is | |
1719 | * important because many archs have addressing restrictions and might | |
1720 | * fail if the percpu area is located far away from the previous | |
1721 | * location. As an added bonus, in non-NUMA cases, embedding is | |
1722 | * generally a good idea TLB-wise because percpu area can piggy back | |
1723 | * on the physical linear memory mapping which uses large page | |
1724 | * mappings on applicable archs. | |
1725 | */ | |
1726 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | |
1727 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | |
1728 | EXPORT_SYMBOL(__per_cpu_offset); | |
1729 | ||
1730 | void __init setup_per_cpu_areas(void) | |
1731 | { | |
1732 | size_t static_size = __per_cpu_end - __per_cpu_start; | |
1733 | ssize_t unit_size; | |
1734 | unsigned long delta; | |
1735 | unsigned int cpu; | |
1736 | ||
1737 | /* | |
1738 | * Always reserve area for module percpu variables. That's | |
1739 | * what the legacy allocator did. | |
1740 | */ | |
1741 | unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE, | |
1742 | PERCPU_DYNAMIC_RESERVE); | |
1743 | if (unit_size < 0) | |
1744 | panic("Failed to initialized percpu areas."); | |
1745 | ||
1746 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
1747 | for_each_possible_cpu(cpu) | |
1748 | __per_cpu_offset[cpu] = delta + cpu * unit_size; | |
1749 | } | |
1750 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |