]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/percpu.c
Merge branch 'master' into percpu
[net-next-2.6.git] / mm / percpu.c
CommitLineData
fbf59bc9
TH
1/*
2 * linux/mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
2f39e637
TH
11 * chunk is consisted of boot-time determined number of units and the
12 * first chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated. ie. in
17 * vmalloc area
fbf59bc9
TH
18 *
19 * c0 c1 c2
20 * ------------------- ------------------- ------------
21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
22 * ------------------- ...... ------------------- .... ------------
23 *
24 * Allocation is done in offset-size areas of single unit space. Ie,
25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
2f39e637
TH
26 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
27 * cpus. On NUMA, the mapping can be non-linear and even sparse.
28 * Percpu access can be done by configuring percpu base registers
29 * according to cpu to unit mapping and pcpu_unit_size.
fbf59bc9 30 *
2f39e637
TH
31 * There are usually many small percpu allocations many of them being
32 * as small as 4 bytes. The allocator organizes chunks into lists
fbf59bc9
TH
33 * according to free size and tries to allocate from the fullest one.
34 * Each chunk keeps the maximum contiguous area size hint which is
35 * guaranteed to be eqaul to or larger than the maximum contiguous
36 * area in the chunk. This helps the allocator not to iterate the
37 * chunk maps unnecessarily.
38 *
39 * Allocation state in each chunk is kept using an array of integers
40 * on chunk->map. A positive value in the map represents a free
41 * region and negative allocated. Allocation inside a chunk is done
42 * by scanning this map sequentially and serving the first matching
43 * entry. This is mostly copied from the percpu_modalloc() allocator.
e1b9aa3f
CL
44 * Chunks can be determined from the address using the index field
45 * in the page struct. The index field contains a pointer to the chunk.
fbf59bc9
TH
46 *
47 * To use this allocator, arch code should do the followings.
48 *
fbf59bc9 49 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
e0100983
TH
50 * regular address to percpu pointer and back if they need to be
51 * different from the default
fbf59bc9 52 *
8d408b4b
TH
53 * - use pcpu_setup_first_chunk() during percpu area initialization to
54 * setup the first chunk containing the kernel static percpu area
fbf59bc9
TH
55 */
56
57#include <linux/bitmap.h>
58#include <linux/bootmem.h>
fd1e8a1f 59#include <linux/err.h>
fbf59bc9 60#include <linux/list.h>
a530b795 61#include <linux/log2.h>
fbf59bc9
TH
62#include <linux/mm.h>
63#include <linux/module.h>
64#include <linux/mutex.h>
65#include <linux/percpu.h>
66#include <linux/pfn.h>
fbf59bc9 67#include <linux/slab.h>
ccea34b5 68#include <linux/spinlock.h>
fbf59bc9 69#include <linux/vmalloc.h>
a56dbddf 70#include <linux/workqueue.h>
fbf59bc9
TH
71
72#include <asm/cacheflush.h>
e0100983 73#include <asm/sections.h>
fbf59bc9 74#include <asm/tlbflush.h>
3b034b0d 75#include <asm/io.h>
fbf59bc9 76
fbf59bc9
TH
77#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
79
e0100983
TH
80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81#ifndef __addr_to_pcpu_ptr
82#define __addr_to_pcpu_ptr(addr) \
83 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
84 + (unsigned long)__per_cpu_start)
85#endif
86#ifndef __pcpu_ptr_to_addr
87#define __pcpu_ptr_to_addr(ptr) \
88 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
89 - (unsigned long)__per_cpu_start)
90#endif
91
fbf59bc9
TH
92struct pcpu_chunk {
93 struct list_head list; /* linked to pcpu_slot lists */
fbf59bc9
TH
94 int free_size; /* free bytes in the chunk */
95 int contig_hint; /* max contiguous size hint */
bba174f5 96 void *base_addr; /* base address of this chunk */
fbf59bc9
TH
97 int map_used; /* # of map entries used */
98 int map_alloc; /* # of map entries allocated */
99 int *map; /* allocation map */
6563297c 100 struct vm_struct **vms; /* mapped vmalloc regions */
8d408b4b 101 bool immutable; /* no [de]population allowed */
ce3141a2 102 unsigned long populated[]; /* populated bitmap */
fbf59bc9
TH
103};
104
40150d37
TH
105static int pcpu_unit_pages __read_mostly;
106static int pcpu_unit_size __read_mostly;
2f39e637 107static int pcpu_nr_units __read_mostly;
6563297c 108static int pcpu_atom_size __read_mostly;
40150d37
TH
109static int pcpu_nr_slots __read_mostly;
110static size_t pcpu_chunk_struct_size __read_mostly;
fbf59bc9 111
2f39e637
TH
112/* cpus with the lowest and highest unit numbers */
113static unsigned int pcpu_first_unit_cpu __read_mostly;
114static unsigned int pcpu_last_unit_cpu __read_mostly;
115
fbf59bc9 116/* the address of the first chunk which starts with the kernel static area */
40150d37 117void *pcpu_base_addr __read_mostly;
fbf59bc9
TH
118EXPORT_SYMBOL_GPL(pcpu_base_addr);
119
fb435d52
TH
120static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
121const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
2f39e637 122
6563297c
TH
123/* group information, used for vm allocation */
124static int pcpu_nr_groups __read_mostly;
125static const unsigned long *pcpu_group_offsets __read_mostly;
126static const size_t *pcpu_group_sizes __read_mostly;
127
ae9e6bc9
TH
128/*
129 * The first chunk which always exists. Note that unlike other
130 * chunks, this one can be allocated and mapped in several different
131 * ways and thus often doesn't live in the vmalloc area.
132 */
133static struct pcpu_chunk *pcpu_first_chunk;
134
135/*
136 * Optional reserved chunk. This chunk reserves part of the first
137 * chunk and serves it for reserved allocations. The amount of
138 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
139 * area doesn't exist, the following variables contain NULL and 0
140 * respectively.
141 */
edcb4639 142static struct pcpu_chunk *pcpu_reserved_chunk;
edcb4639
TH
143static int pcpu_reserved_chunk_limit;
144
fbf59bc9 145/*
ccea34b5
TH
146 * Synchronization rules.
147 *
148 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
ce3141a2
TH
149 * protects allocation/reclaim paths, chunks, populated bitmap and
150 * vmalloc mapping. The latter is a spinlock and protects the index
151 * data structures - chunk slots, chunks and area maps in chunks.
ccea34b5
TH
152 *
153 * During allocation, pcpu_alloc_mutex is kept locked all the time and
154 * pcpu_lock is grabbed and released as necessary. All actual memory
403a91b1
JK
155 * allocations are done using GFP_KERNEL with pcpu_lock released. In
156 * general, percpu memory can't be allocated with irq off but
157 * irqsave/restore are still used in alloc path so that it can be used
158 * from early init path - sched_init() specifically.
ccea34b5
TH
159 *
160 * Free path accesses and alters only the index data structures, so it
161 * can be safely called from atomic context. When memory needs to be
162 * returned to the system, free path schedules reclaim_work which
163 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
164 * reclaimed, release both locks and frees the chunks. Note that it's
165 * necessary to grab both locks to remove a chunk from circulation as
166 * allocation path might be referencing the chunk with only
167 * pcpu_alloc_mutex locked.
fbf59bc9 168 */
ccea34b5
TH
169static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
170static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
fbf59bc9 171
40150d37 172static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
fbf59bc9 173
a56dbddf
TH
174/* reclaim work to release fully free chunks, scheduled from free path */
175static void pcpu_reclaim(struct work_struct *work);
176static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
177
d9b55eeb 178static int __pcpu_size_to_slot(int size)
fbf59bc9 179{
cae3aeb8 180 int highbit = fls(size); /* size is in bytes */
fbf59bc9
TH
181 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
182}
183
d9b55eeb
TH
184static int pcpu_size_to_slot(int size)
185{
186 if (size == pcpu_unit_size)
187 return pcpu_nr_slots - 1;
188 return __pcpu_size_to_slot(size);
189}
190
fbf59bc9
TH
191static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
192{
193 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
194 return 0;
195
196 return pcpu_size_to_slot(chunk->free_size);
197}
198
199static int pcpu_page_idx(unsigned int cpu, int page_idx)
200{
2f39e637 201 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
fbf59bc9
TH
202}
203
204static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
205 unsigned int cpu, int page_idx)
206{
bba174f5 207 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
fb435d52 208 (page_idx << PAGE_SHIFT);
fbf59bc9
TH
209}
210
ce3141a2
TH
211static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
212 unsigned int cpu, int page_idx)
fbf59bc9 213{
ce3141a2
TH
214 /* must not be used on pre-mapped chunk */
215 WARN_ON(chunk->immutable);
c8a51be4 216
ce3141a2 217 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
fbf59bc9
TH
218}
219
e1b9aa3f
CL
220/* set the pointer to a chunk in a page struct */
221static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
222{
223 page->index = (unsigned long)pcpu;
224}
225
226/* obtain pointer to a chunk from a page struct */
227static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
228{
229 return (struct pcpu_chunk *)page->index;
230}
231
ce3141a2
TH
232static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
233{
234 *rs = find_next_zero_bit(chunk->populated, end, *rs);
235 *re = find_next_bit(chunk->populated, end, *rs + 1);
236}
237
238static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end)
239{
240 *rs = find_next_bit(chunk->populated, end, *rs);
241 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
242}
243
244/*
245 * (Un)populated page region iterators. Iterate over (un)populated
246 * page regions betwen @start and @end in @chunk. @rs and @re should
247 * be integer variables and will be set to start and end page index of
248 * the current region.
249 */
250#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
251 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
252 (rs) < (re); \
253 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
254
255#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
256 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
257 (rs) < (re); \
258 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
259
fbf59bc9 260/**
1880d93b
TH
261 * pcpu_mem_alloc - allocate memory
262 * @size: bytes to allocate
fbf59bc9 263 *
1880d93b
TH
264 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
265 * kzalloc() is used; otherwise, vmalloc() is used. The returned
266 * memory is always zeroed.
fbf59bc9 267 *
ccea34b5
TH
268 * CONTEXT:
269 * Does GFP_KERNEL allocation.
270 *
fbf59bc9 271 * RETURNS:
1880d93b 272 * Pointer to the allocated area on success, NULL on failure.
fbf59bc9 273 */
1880d93b 274static void *pcpu_mem_alloc(size_t size)
fbf59bc9 275{
1880d93b
TH
276 if (size <= PAGE_SIZE)
277 return kzalloc(size, GFP_KERNEL);
278 else {
279 void *ptr = vmalloc(size);
280 if (ptr)
281 memset(ptr, 0, size);
282 return ptr;
283 }
284}
fbf59bc9 285
1880d93b
TH
286/**
287 * pcpu_mem_free - free memory
288 * @ptr: memory to free
289 * @size: size of the area
290 *
291 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
292 */
293static void pcpu_mem_free(void *ptr, size_t size)
294{
fbf59bc9 295 if (size <= PAGE_SIZE)
1880d93b 296 kfree(ptr);
fbf59bc9 297 else
1880d93b 298 vfree(ptr);
fbf59bc9
TH
299}
300
301/**
302 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
303 * @chunk: chunk of interest
304 * @oslot: the previous slot it was on
305 *
306 * This function is called after an allocation or free changed @chunk.
307 * New slot according to the changed state is determined and @chunk is
edcb4639
TH
308 * moved to the slot. Note that the reserved chunk is never put on
309 * chunk slots.
ccea34b5
TH
310 *
311 * CONTEXT:
312 * pcpu_lock.
fbf59bc9
TH
313 */
314static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
315{
316 int nslot = pcpu_chunk_slot(chunk);
317
edcb4639 318 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
fbf59bc9
TH
319 if (oslot < nslot)
320 list_move(&chunk->list, &pcpu_slot[nslot]);
321 else
322 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
323 }
324}
325
fbf59bc9 326/**
e1b9aa3f
CL
327 * pcpu_chunk_addr_search - determine chunk containing specified address
328 * @addr: address for which the chunk needs to be determined.
ccea34b5 329 *
fbf59bc9
TH
330 * RETURNS:
331 * The address of the found chunk.
332 */
333static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
334{
bba174f5 335 void *first_start = pcpu_first_chunk->base_addr;
fbf59bc9 336
ae9e6bc9 337 /* is it in the first chunk? */
79ba6ac8 338 if (addr >= first_start && addr < first_start + pcpu_unit_size) {
ae9e6bc9
TH
339 /* is it in the reserved area? */
340 if (addr < first_start + pcpu_reserved_chunk_limit)
edcb4639 341 return pcpu_reserved_chunk;
ae9e6bc9 342 return pcpu_first_chunk;
edcb4639
TH
343 }
344
04a13c7c
TH
345 /*
346 * The address is relative to unit0 which might be unused and
347 * thus unmapped. Offset the address to the unit space of the
348 * current processor before looking it up in the vmalloc
349 * space. Note that any possible cpu id can be used here, so
350 * there's no need to worry about preemption or cpu hotplug.
351 */
5579fd7e 352 addr += pcpu_unit_offsets[raw_smp_processor_id()];
e1b9aa3f 353 return pcpu_get_page_chunk(vmalloc_to_page(addr));
fbf59bc9
TH
354}
355
9f7dcf22 356/**
833af842
TH
357 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
358 * @chunk: chunk of interest
9f7dcf22 359 *
833af842
TH
360 * Determine whether area map of @chunk needs to be extended to
361 * accomodate a new allocation.
9f7dcf22 362 *
ccea34b5 363 * CONTEXT:
833af842 364 * pcpu_lock.
ccea34b5 365 *
9f7dcf22 366 * RETURNS:
833af842
TH
367 * New target map allocation length if extension is necessary, 0
368 * otherwise.
9f7dcf22 369 */
833af842 370static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
9f7dcf22
TH
371{
372 int new_alloc;
9f7dcf22 373
9f7dcf22
TH
374 if (chunk->map_alloc >= chunk->map_used + 2)
375 return 0;
376
377 new_alloc = PCPU_DFL_MAP_ALLOC;
378 while (new_alloc < chunk->map_used + 2)
379 new_alloc *= 2;
380
833af842
TH
381 return new_alloc;
382}
383
384/**
385 * pcpu_extend_area_map - extend area map of a chunk
386 * @chunk: chunk of interest
387 * @new_alloc: new target allocation length of the area map
388 *
389 * Extend area map of @chunk to have @new_alloc entries.
390 *
391 * CONTEXT:
392 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
393 *
394 * RETURNS:
395 * 0 on success, -errno on failure.
396 */
397static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
398{
399 int *old = NULL, *new = NULL;
400 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
401 unsigned long flags;
402
403 new = pcpu_mem_alloc(new_size);
404 if (!new)
9f7dcf22 405 return -ENOMEM;
ccea34b5 406
833af842
TH
407 /* acquire pcpu_lock and switch to new area map */
408 spin_lock_irqsave(&pcpu_lock, flags);
409
410 if (new_alloc <= chunk->map_alloc)
411 goto out_unlock;
9f7dcf22 412
833af842
TH
413 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
414 memcpy(new, chunk->map, old_size);
9f7dcf22
TH
415
416 /*
417 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
418 * one of the first chunks and still using static map.
419 */
420 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
833af842 421 old = chunk->map;
9f7dcf22
TH
422
423 chunk->map_alloc = new_alloc;
424 chunk->map = new;
833af842
TH
425 new = NULL;
426
427out_unlock:
428 spin_unlock_irqrestore(&pcpu_lock, flags);
429
430 /*
431 * pcpu_mem_free() might end up calling vfree() which uses
432 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
433 */
434 pcpu_mem_free(old, old_size);
435 pcpu_mem_free(new, new_size);
436
9f7dcf22
TH
437 return 0;
438}
439
fbf59bc9
TH
440/**
441 * pcpu_split_block - split a map block
442 * @chunk: chunk of interest
443 * @i: index of map block to split
cae3aeb8
TH
444 * @head: head size in bytes (can be 0)
445 * @tail: tail size in bytes (can be 0)
fbf59bc9
TH
446 *
447 * Split the @i'th map block into two or three blocks. If @head is
448 * non-zero, @head bytes block is inserted before block @i moving it
449 * to @i+1 and reducing its size by @head bytes.
450 *
451 * If @tail is non-zero, the target block, which can be @i or @i+1
452 * depending on @head, is reduced by @tail bytes and @tail byte block
453 * is inserted after the target block.
454 *
9f7dcf22 455 * @chunk->map must have enough free slots to accomodate the split.
ccea34b5
TH
456 *
457 * CONTEXT:
458 * pcpu_lock.
fbf59bc9 459 */
9f7dcf22
TH
460static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
461 int head, int tail)
fbf59bc9
TH
462{
463 int nr_extra = !!head + !!tail;
1880d93b 464
9f7dcf22 465 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
fbf59bc9 466
9f7dcf22 467 /* insert new subblocks */
fbf59bc9
TH
468 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
469 sizeof(chunk->map[0]) * (chunk->map_used - i));
470 chunk->map_used += nr_extra;
471
472 if (head) {
473 chunk->map[i + 1] = chunk->map[i] - head;
474 chunk->map[i++] = head;
475 }
476 if (tail) {
477 chunk->map[i++] -= tail;
478 chunk->map[i] = tail;
479 }
fbf59bc9
TH
480}
481
482/**
483 * pcpu_alloc_area - allocate area from a pcpu_chunk
484 * @chunk: chunk of interest
cae3aeb8 485 * @size: wanted size in bytes
fbf59bc9
TH
486 * @align: wanted align
487 *
488 * Try to allocate @size bytes area aligned at @align from @chunk.
489 * Note that this function only allocates the offset. It doesn't
490 * populate or map the area.
491 *
9f7dcf22
TH
492 * @chunk->map must have at least two free slots.
493 *
ccea34b5
TH
494 * CONTEXT:
495 * pcpu_lock.
496 *
fbf59bc9 497 * RETURNS:
9f7dcf22
TH
498 * Allocated offset in @chunk on success, -1 if no matching area is
499 * found.
fbf59bc9
TH
500 */
501static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
502{
503 int oslot = pcpu_chunk_slot(chunk);
504 int max_contig = 0;
505 int i, off;
506
fbf59bc9
TH
507 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
508 bool is_last = i + 1 == chunk->map_used;
509 int head, tail;
510
511 /* extra for alignment requirement */
512 head = ALIGN(off, align) - off;
513 BUG_ON(i == 0 && head != 0);
514
515 if (chunk->map[i] < 0)
516 continue;
517 if (chunk->map[i] < head + size) {
518 max_contig = max(chunk->map[i], max_contig);
519 continue;
520 }
521
522 /*
523 * If head is small or the previous block is free,
524 * merge'em. Note that 'small' is defined as smaller
525 * than sizeof(int), which is very small but isn't too
526 * uncommon for percpu allocations.
527 */
528 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
529 if (chunk->map[i - 1] > 0)
530 chunk->map[i - 1] += head;
531 else {
532 chunk->map[i - 1] -= head;
533 chunk->free_size -= head;
534 }
535 chunk->map[i] -= head;
536 off += head;
537 head = 0;
538 }
539
540 /* if tail is small, just keep it around */
541 tail = chunk->map[i] - head - size;
542 if (tail < sizeof(int))
543 tail = 0;
544
545 /* split if warranted */
546 if (head || tail) {
9f7dcf22 547 pcpu_split_block(chunk, i, head, tail);
fbf59bc9
TH
548 if (head) {
549 i++;
550 off += head;
551 max_contig = max(chunk->map[i - 1], max_contig);
552 }
553 if (tail)
554 max_contig = max(chunk->map[i + 1], max_contig);
555 }
556
557 /* update hint and mark allocated */
558 if (is_last)
559 chunk->contig_hint = max_contig; /* fully scanned */
560 else
561 chunk->contig_hint = max(chunk->contig_hint,
562 max_contig);
563
564 chunk->free_size -= chunk->map[i];
565 chunk->map[i] = -chunk->map[i];
566
567 pcpu_chunk_relocate(chunk, oslot);
568 return off;
569 }
570
571 chunk->contig_hint = max_contig; /* fully scanned */
572 pcpu_chunk_relocate(chunk, oslot);
573
9f7dcf22
TH
574 /* tell the upper layer that this chunk has no matching area */
575 return -1;
fbf59bc9
TH
576}
577
578/**
579 * pcpu_free_area - free area to a pcpu_chunk
580 * @chunk: chunk of interest
581 * @freeme: offset of area to free
582 *
583 * Free area starting from @freeme to @chunk. Note that this function
584 * only modifies the allocation map. It doesn't depopulate or unmap
585 * the area.
ccea34b5
TH
586 *
587 * CONTEXT:
588 * pcpu_lock.
fbf59bc9
TH
589 */
590static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
591{
592 int oslot = pcpu_chunk_slot(chunk);
593 int i, off;
594
595 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
596 if (off == freeme)
597 break;
598 BUG_ON(off != freeme);
599 BUG_ON(chunk->map[i] > 0);
600
601 chunk->map[i] = -chunk->map[i];
602 chunk->free_size += chunk->map[i];
603
604 /* merge with previous? */
605 if (i > 0 && chunk->map[i - 1] >= 0) {
606 chunk->map[i - 1] += chunk->map[i];
607 chunk->map_used--;
608 memmove(&chunk->map[i], &chunk->map[i + 1],
609 (chunk->map_used - i) * sizeof(chunk->map[0]));
610 i--;
611 }
612 /* merge with next? */
613 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
614 chunk->map[i] += chunk->map[i + 1];
615 chunk->map_used--;
616 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
617 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
618 }
619
620 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
621 pcpu_chunk_relocate(chunk, oslot);
622}
623
624/**
ce3141a2 625 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
fbf59bc9 626 * @chunk: chunk of interest
ce3141a2
TH
627 * @bitmapp: output parameter for bitmap
628 * @may_alloc: may allocate the array
fbf59bc9 629 *
ce3141a2
TH
630 * Returns pointer to array of pointers to struct page and bitmap,
631 * both of which can be indexed with pcpu_page_idx(). The returned
632 * array is cleared to zero and *@bitmapp is copied from
633 * @chunk->populated. Note that there is only one array and bitmap
634 * and access exclusion is the caller's responsibility.
635 *
636 * CONTEXT:
637 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
638 * Otherwise, don't care.
639 *
640 * RETURNS:
641 * Pointer to temp pages array on success, NULL on failure.
fbf59bc9 642 */
ce3141a2
TH
643static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
644 unsigned long **bitmapp,
645 bool may_alloc)
fbf59bc9 646{
ce3141a2
TH
647 static struct page **pages;
648 static unsigned long *bitmap;
2f39e637 649 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
ce3141a2
TH
650 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
651 sizeof(unsigned long);
652
653 if (!pages || !bitmap) {
654 if (may_alloc && !pages)
655 pages = pcpu_mem_alloc(pages_size);
656 if (may_alloc && !bitmap)
657 bitmap = pcpu_mem_alloc(bitmap_size);
658 if (!pages || !bitmap)
659 return NULL;
660 }
fbf59bc9 661
ce3141a2
TH
662 memset(pages, 0, pages_size);
663 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
8d408b4b 664
ce3141a2
TH
665 *bitmapp = bitmap;
666 return pages;
667}
fbf59bc9 668
ce3141a2
TH
669/**
670 * pcpu_free_pages - free pages which were allocated for @chunk
671 * @chunk: chunk pages were allocated for
672 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
673 * @populated: populated bitmap
674 * @page_start: page index of the first page to be freed
675 * @page_end: page index of the last page to be freed + 1
676 *
677 * Free pages [@page_start and @page_end) in @pages for all units.
678 * The pages were allocated for @chunk.
679 */
680static void pcpu_free_pages(struct pcpu_chunk *chunk,
681 struct page **pages, unsigned long *populated,
682 int page_start, int page_end)
683{
684 unsigned int cpu;
685 int i;
686
687 for_each_possible_cpu(cpu) {
688 for (i = page_start; i < page_end; i++) {
689 struct page *page = pages[pcpu_page_idx(cpu, i)];
690
691 if (page)
692 __free_page(page);
693 }
694 }
fbf59bc9
TH
695}
696
697/**
ce3141a2
TH
698 * pcpu_alloc_pages - allocates pages for @chunk
699 * @chunk: target chunk
700 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
701 * @populated: populated bitmap
702 * @page_start: page index of the first page to be allocated
703 * @page_end: page index of the last page to be allocated + 1
704 *
705 * Allocate pages [@page_start,@page_end) into @pages for all units.
706 * The allocation is for @chunk. Percpu core doesn't care about the
707 * content of @pages and will pass it verbatim to pcpu_map_pages().
fbf59bc9 708 */
ce3141a2
TH
709static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
710 struct page **pages, unsigned long *populated,
711 int page_start, int page_end)
fbf59bc9 712{
ce3141a2 713 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
fbf59bc9
TH
714 unsigned int cpu;
715 int i;
716
ce3141a2
TH
717 for_each_possible_cpu(cpu) {
718 for (i = page_start; i < page_end; i++) {
719 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
720
721 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
722 if (!*pagep) {
723 pcpu_free_pages(chunk, pages, populated,
724 page_start, page_end);
725 return -ENOMEM;
726 }
727 }
728 }
729 return 0;
730}
fbf59bc9 731
ce3141a2
TH
732/**
733 * pcpu_pre_unmap_flush - flush cache prior to unmapping
734 * @chunk: chunk the regions to be flushed belongs to
735 * @page_start: page index of the first page to be flushed
736 * @page_end: page index of the last page to be flushed + 1
737 *
738 * Pages in [@page_start,@page_end) of @chunk are about to be
739 * unmapped. Flush cache. As each flushing trial can be very
740 * expensive, issue flush on the whole region at once rather than
741 * doing it for each cpu. This could be an overkill but is more
742 * scalable.
743 */
744static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
745 int page_start, int page_end)
746{
2f39e637
TH
747 flush_cache_vunmap(
748 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
749 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
ce3141a2
TH
750}
751
752static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
753{
754 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
755}
fbf59bc9 756
ce3141a2
TH
757/**
758 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
fbf59bc9 759 * @chunk: chunk of interest
ce3141a2
TH
760 * @pages: pages array which can be used to pass information to free
761 * @populated: populated bitmap
fbf59bc9
TH
762 * @page_start: page index of the first page to unmap
763 * @page_end: page index of the last page to unmap + 1
fbf59bc9
TH
764 *
765 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
ce3141a2
TH
766 * Corresponding elements in @pages were cleared by the caller and can
767 * be used to carry information to pcpu_free_pages() which will be
768 * called after all unmaps are finished. The caller should call
769 * proper pre/post flush functions.
fbf59bc9 770 */
ce3141a2
TH
771static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
772 struct page **pages, unsigned long *populated,
773 int page_start, int page_end)
fbf59bc9 774{
fbf59bc9 775 unsigned int cpu;
ce3141a2 776 int i;
fbf59bc9 777
ce3141a2
TH
778 for_each_possible_cpu(cpu) {
779 for (i = page_start; i < page_end; i++) {
780 struct page *page;
fbf59bc9 781
ce3141a2
TH
782 page = pcpu_chunk_page(chunk, cpu, i);
783 WARN_ON(!page);
784 pages[pcpu_page_idx(cpu, i)] = page;
fbf59bc9 785 }
ce3141a2
TH
786 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
787 page_end - page_start);
fbf59bc9
TH
788 }
789
ce3141a2
TH
790 for (i = page_start; i < page_end; i++)
791 __clear_bit(i, populated);
792}
793
794/**
795 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
796 * @chunk: pcpu_chunk the regions to be flushed belong to
797 * @page_start: page index of the first page to be flushed
798 * @page_end: page index of the last page to be flushed + 1
799 *
800 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
801 * TLB for the regions. This can be skipped if the area is to be
802 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
803 *
804 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
805 * for the whole region.
806 */
807static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
808 int page_start, int page_end)
809{
2f39e637
TH
810 flush_tlb_kernel_range(
811 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
812 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
fbf59bc9
TH
813}
814
c8a51be4
TH
815static int __pcpu_map_pages(unsigned long addr, struct page **pages,
816 int nr_pages)
817{
818 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
819 PAGE_KERNEL, pages);
fbf59bc9
TH
820}
821
822/**
ce3141a2 823 * pcpu_map_pages - map pages into a pcpu_chunk
fbf59bc9 824 * @chunk: chunk of interest
ce3141a2
TH
825 * @pages: pages array containing pages to be mapped
826 * @populated: populated bitmap
fbf59bc9
TH
827 * @page_start: page index of the first page to map
828 * @page_end: page index of the last page to map + 1
829 *
ce3141a2
TH
830 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
831 * caller is responsible for calling pcpu_post_map_flush() after all
832 * mappings are complete.
833 *
834 * This function is responsible for setting corresponding bits in
835 * @chunk->populated bitmap and whatever is necessary for reverse
836 * lookup (addr -> chunk).
fbf59bc9 837 */
ce3141a2
TH
838static int pcpu_map_pages(struct pcpu_chunk *chunk,
839 struct page **pages, unsigned long *populated,
840 int page_start, int page_end)
fbf59bc9 841{
ce3141a2
TH
842 unsigned int cpu, tcpu;
843 int i, err;
8d408b4b 844
fbf59bc9 845 for_each_possible_cpu(cpu) {
c8a51be4 846 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
ce3141a2 847 &pages[pcpu_page_idx(cpu, page_start)],
c8a51be4 848 page_end - page_start);
fbf59bc9 849 if (err < 0)
ce3141a2 850 goto err;
c8a51be4
TH
851 }
852
ce3141a2
TH
853 /* mapping successful, link chunk and mark populated */
854 for (i = page_start; i < page_end; i++) {
855 for_each_possible_cpu(cpu)
856 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
857 chunk);
858 __set_bit(i, populated);
fbf59bc9
TH
859 }
860
fbf59bc9 861 return 0;
ce3141a2
TH
862
863err:
864 for_each_possible_cpu(tcpu) {
865 if (tcpu == cpu)
866 break;
867 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
868 page_end - page_start);
869 }
870 return err;
871}
872
873/**
874 * pcpu_post_map_flush - flush cache after mapping
875 * @chunk: pcpu_chunk the regions to be flushed belong to
876 * @page_start: page index of the first page to be flushed
877 * @page_end: page index of the last page to be flushed + 1
878 *
879 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
880 * cache.
881 *
882 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
883 * for the whole region.
884 */
885static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
886 int page_start, int page_end)
887{
2f39e637
TH
888 flush_cache_vmap(
889 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
890 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
c8a51be4
TH
891}
892
fbf59bc9
TH
893/**
894 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
895 * @chunk: chunk to depopulate
896 * @off: offset to the area to depopulate
cae3aeb8 897 * @size: size of the area to depopulate in bytes
fbf59bc9
TH
898 * @flush: whether to flush cache and tlb or not
899 *
900 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
901 * from @chunk. If @flush is true, vcache is flushed before unmapping
902 * and tlb after.
ccea34b5
TH
903 *
904 * CONTEXT:
905 * pcpu_alloc_mutex.
fbf59bc9 906 */
ce3141a2 907static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
fbf59bc9
TH
908{
909 int page_start = PFN_DOWN(off);
910 int page_end = PFN_UP(off + size);
ce3141a2
TH
911 struct page **pages;
912 unsigned long *populated;
913 int rs, re;
914
915 /* quick path, check whether it's empty already */
22b737f4
WC
916 rs = page_start;
917 pcpu_next_unpop(chunk, &rs, &re, page_end);
918 if (rs == page_start && re == page_end)
919 return;
fbf59bc9 920
ce3141a2
TH
921 /* immutable chunks can't be depopulated */
922 WARN_ON(chunk->immutable);
fbf59bc9 923
ce3141a2
TH
924 /*
925 * If control reaches here, there must have been at least one
926 * successful population attempt so the temp pages array must
927 * be available now.
928 */
929 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
930 BUG_ON(!pages);
fbf59bc9 931
ce3141a2
TH
932 /* unmap and free */
933 pcpu_pre_unmap_flush(chunk, page_start, page_end);
fbf59bc9 934
ce3141a2
TH
935 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
936 pcpu_unmap_pages(chunk, pages, populated, rs, re);
fbf59bc9 937
ce3141a2
TH
938 /* no need to flush tlb, vmalloc will handle it lazily */
939
940 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
941 pcpu_free_pages(chunk, pages, populated, rs, re);
fbf59bc9 942
ce3141a2
TH
943 /* commit new bitmap */
944 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
fbf59bc9
TH
945}
946
947/**
948 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
949 * @chunk: chunk of interest
950 * @off: offset to the area to populate
cae3aeb8 951 * @size: size of the area to populate in bytes
fbf59bc9
TH
952 *
953 * For each cpu, populate and map pages [@page_start,@page_end) into
954 * @chunk. The area is cleared on return.
ccea34b5
TH
955 *
956 * CONTEXT:
957 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
fbf59bc9
TH
958 */
959static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
960{
fbf59bc9
TH
961 int page_start = PFN_DOWN(off);
962 int page_end = PFN_UP(off + size);
ce3141a2
TH
963 int free_end = page_start, unmap_end = page_start;
964 struct page **pages;
965 unsigned long *populated;
fbf59bc9 966 unsigned int cpu;
ce3141a2 967 int rs, re, rc;
fbf59bc9 968
ce3141a2 969 /* quick path, check whether all pages are already there */
22b737f4
WC
970 rs = page_start;
971 pcpu_next_pop(chunk, &rs, &re, page_end);
972 if (rs == page_start && re == page_end)
973 goto clear;
fbf59bc9 974
ce3141a2
TH
975 /* need to allocate and map pages, this chunk can't be immutable */
976 WARN_ON(chunk->immutable);
fbf59bc9 977
ce3141a2
TH
978 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
979 if (!pages)
980 return -ENOMEM;
fbf59bc9 981
ce3141a2
TH
982 /* alloc and map */
983 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
984 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
985 if (rc)
986 goto err_free;
987 free_end = re;
fbf59bc9
TH
988 }
989
ce3141a2
TH
990 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
991 rc = pcpu_map_pages(chunk, pages, populated, rs, re);
992 if (rc)
993 goto err_unmap;
994 unmap_end = re;
995 }
996 pcpu_post_map_flush(chunk, page_start, page_end);
fbf59bc9 997
ce3141a2
TH
998 /* commit new bitmap */
999 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
1000clear:
fbf59bc9 1001 for_each_possible_cpu(cpu)
2f39e637 1002 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
fbf59bc9 1003 return 0;
ce3141a2
TH
1004
1005err_unmap:
1006 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
1007 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
1008 pcpu_unmap_pages(chunk, pages, populated, rs, re);
1009 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
1010err_free:
1011 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
1012 pcpu_free_pages(chunk, pages, populated, rs, re);
1013 return rc;
fbf59bc9
TH
1014}
1015
1016static void free_pcpu_chunk(struct pcpu_chunk *chunk)
1017{
1018 if (!chunk)
1019 return;
6563297c
TH
1020 if (chunk->vms)
1021 pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups);
1880d93b 1022 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
fbf59bc9
TH
1023 kfree(chunk);
1024}
1025
1026static struct pcpu_chunk *alloc_pcpu_chunk(void)
1027{
1028 struct pcpu_chunk *chunk;
1029
1030 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
1031 if (!chunk)
1032 return NULL;
1033
1880d93b 1034 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
fbf59bc9
TH
1035 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
1036 chunk->map[chunk->map_used++] = pcpu_unit_size;
1037
6563297c
TH
1038 chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
1039 pcpu_nr_groups, pcpu_atom_size,
1040 GFP_KERNEL);
1041 if (!chunk->vms) {
fbf59bc9
TH
1042 free_pcpu_chunk(chunk);
1043 return NULL;
1044 }
1045
1046 INIT_LIST_HEAD(&chunk->list);
1047 chunk->free_size = pcpu_unit_size;
1048 chunk->contig_hint = pcpu_unit_size;
6563297c 1049 chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0];
fbf59bc9
TH
1050
1051 return chunk;
1052}
1053
1054/**
edcb4639 1055 * pcpu_alloc - the percpu allocator
cae3aeb8 1056 * @size: size of area to allocate in bytes
fbf59bc9 1057 * @align: alignment of area (max PAGE_SIZE)
edcb4639 1058 * @reserved: allocate from the reserved chunk if available
fbf59bc9 1059 *
ccea34b5
TH
1060 * Allocate percpu area of @size bytes aligned at @align.
1061 *
1062 * CONTEXT:
1063 * Does GFP_KERNEL allocation.
fbf59bc9
TH
1064 *
1065 * RETURNS:
1066 * Percpu pointer to the allocated area on success, NULL on failure.
1067 */
edcb4639 1068static void *pcpu_alloc(size_t size, size_t align, bool reserved)
fbf59bc9 1069{
f2badb0c 1070 static int warn_limit = 10;
fbf59bc9 1071 struct pcpu_chunk *chunk;
f2badb0c 1072 const char *err;
833af842 1073 int slot, off, new_alloc;
403a91b1 1074 unsigned long flags;
fbf59bc9 1075
8d408b4b 1076 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
fbf59bc9
TH
1077 WARN(true, "illegal size (%zu) or align (%zu) for "
1078 "percpu allocation\n", size, align);
1079 return NULL;
1080 }
1081
ccea34b5 1082 mutex_lock(&pcpu_alloc_mutex);
403a91b1 1083 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 1084
edcb4639
TH
1085 /* serve reserved allocations from the reserved chunk if available */
1086 if (reserved && pcpu_reserved_chunk) {
1087 chunk = pcpu_reserved_chunk;
833af842
TH
1088
1089 if (size > chunk->contig_hint) {
1090 err = "alloc from reserved chunk failed";
ccea34b5 1091 goto fail_unlock;
f2badb0c 1092 }
833af842
TH
1093
1094 while ((new_alloc = pcpu_need_to_extend(chunk))) {
1095 spin_unlock_irqrestore(&pcpu_lock, flags);
1096 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
1097 err = "failed to extend area map of reserved chunk";
1098 goto fail_unlock_mutex;
1099 }
1100 spin_lock_irqsave(&pcpu_lock, flags);
1101 }
1102
edcb4639
TH
1103 off = pcpu_alloc_area(chunk, size, align);
1104 if (off >= 0)
1105 goto area_found;
833af842 1106
f2badb0c 1107 err = "alloc from reserved chunk failed";
ccea34b5 1108 goto fail_unlock;
edcb4639
TH
1109 }
1110
ccea34b5 1111restart:
edcb4639 1112 /* search through normal chunks */
fbf59bc9
TH
1113 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1114 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1115 if (size > chunk->contig_hint)
1116 continue;
ccea34b5 1117
833af842
TH
1118 new_alloc = pcpu_need_to_extend(chunk);
1119 if (new_alloc) {
1120 spin_unlock_irqrestore(&pcpu_lock, flags);
1121 if (pcpu_extend_area_map(chunk,
1122 new_alloc) < 0) {
1123 err = "failed to extend area map";
1124 goto fail_unlock_mutex;
1125 }
1126 spin_lock_irqsave(&pcpu_lock, flags);
1127 /*
1128 * pcpu_lock has been dropped, need to
1129 * restart cpu_slot list walking.
1130 */
1131 goto restart;
ccea34b5
TH
1132 }
1133
fbf59bc9
TH
1134 off = pcpu_alloc_area(chunk, size, align);
1135 if (off >= 0)
1136 goto area_found;
fbf59bc9
TH
1137 }
1138 }
1139
1140 /* hmmm... no space left, create a new chunk */
403a91b1 1141 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 1142
fbf59bc9 1143 chunk = alloc_pcpu_chunk();
f2badb0c
TH
1144 if (!chunk) {
1145 err = "failed to allocate new chunk";
ccea34b5 1146 goto fail_unlock_mutex;
f2badb0c 1147 }
ccea34b5 1148
403a91b1 1149 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 1150 pcpu_chunk_relocate(chunk, -1);
ccea34b5 1151 goto restart;
fbf59bc9
TH
1152
1153area_found:
403a91b1 1154 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5 1155
fbf59bc9
TH
1156 /* populate, map and clear the area */
1157 if (pcpu_populate_chunk(chunk, off, size)) {
403a91b1 1158 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9 1159 pcpu_free_area(chunk, off);
f2badb0c 1160 err = "failed to populate";
ccea34b5 1161 goto fail_unlock;
fbf59bc9
TH
1162 }
1163
ccea34b5
TH
1164 mutex_unlock(&pcpu_alloc_mutex);
1165
bba174f5
TH
1166 /* return address relative to base address */
1167 return __addr_to_pcpu_ptr(chunk->base_addr + off);
ccea34b5
TH
1168
1169fail_unlock:
403a91b1 1170 spin_unlock_irqrestore(&pcpu_lock, flags);
ccea34b5
TH
1171fail_unlock_mutex:
1172 mutex_unlock(&pcpu_alloc_mutex);
f2badb0c
TH
1173 if (warn_limit) {
1174 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
1175 "%s\n", size, align, err);
1176 dump_stack();
1177 if (!--warn_limit)
1178 pr_info("PERCPU: limit reached, disable warning\n");
1179 }
ccea34b5 1180 return NULL;
fbf59bc9 1181}
edcb4639
TH
1182
1183/**
1184 * __alloc_percpu - allocate dynamic percpu area
1185 * @size: size of area to allocate in bytes
1186 * @align: alignment of area (max PAGE_SIZE)
1187 *
1188 * Allocate percpu area of @size bytes aligned at @align. Might
1189 * sleep. Might trigger writeouts.
1190 *
ccea34b5
TH
1191 * CONTEXT:
1192 * Does GFP_KERNEL allocation.
1193 *
edcb4639
TH
1194 * RETURNS:
1195 * Percpu pointer to the allocated area on success, NULL on failure.
1196 */
1197void *__alloc_percpu(size_t size, size_t align)
1198{
1199 return pcpu_alloc(size, align, false);
1200}
fbf59bc9
TH
1201EXPORT_SYMBOL_GPL(__alloc_percpu);
1202
edcb4639
TH
1203/**
1204 * __alloc_reserved_percpu - allocate reserved percpu area
1205 * @size: size of area to allocate in bytes
1206 * @align: alignment of area (max PAGE_SIZE)
1207 *
1208 * Allocate percpu area of @size bytes aligned at @align from reserved
1209 * percpu area if arch has set it up; otherwise, allocation is served
1210 * from the same dynamic area. Might sleep. Might trigger writeouts.
1211 *
ccea34b5
TH
1212 * CONTEXT:
1213 * Does GFP_KERNEL allocation.
1214 *
edcb4639
TH
1215 * RETURNS:
1216 * Percpu pointer to the allocated area on success, NULL on failure.
1217 */
1218void *__alloc_reserved_percpu(size_t size, size_t align)
1219{
1220 return pcpu_alloc(size, align, true);
1221}
1222
a56dbddf
TH
1223/**
1224 * pcpu_reclaim - reclaim fully free chunks, workqueue function
1225 * @work: unused
1226 *
1227 * Reclaim all fully free chunks except for the first one.
ccea34b5
TH
1228 *
1229 * CONTEXT:
1230 * workqueue context.
a56dbddf
TH
1231 */
1232static void pcpu_reclaim(struct work_struct *work)
fbf59bc9 1233{
a56dbddf
TH
1234 LIST_HEAD(todo);
1235 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
1236 struct pcpu_chunk *chunk, *next;
1237
ccea34b5
TH
1238 mutex_lock(&pcpu_alloc_mutex);
1239 spin_lock_irq(&pcpu_lock);
a56dbddf
TH
1240
1241 list_for_each_entry_safe(chunk, next, head, list) {
1242 WARN_ON(chunk->immutable);
1243
1244 /* spare the first one */
1245 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
1246 continue;
1247
a56dbddf
TH
1248 list_move(&chunk->list, &todo);
1249 }
1250
ccea34b5 1251 spin_unlock_irq(&pcpu_lock);
a56dbddf
TH
1252
1253 list_for_each_entry_safe(chunk, next, &todo, list) {
ce3141a2 1254 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
a56dbddf
TH
1255 free_pcpu_chunk(chunk);
1256 }
971f3918
TH
1257
1258 mutex_unlock(&pcpu_alloc_mutex);
fbf59bc9
TH
1259}
1260
1261/**
1262 * free_percpu - free percpu area
1263 * @ptr: pointer to area to free
1264 *
ccea34b5
TH
1265 * Free percpu area @ptr.
1266 *
1267 * CONTEXT:
1268 * Can be called from atomic context.
fbf59bc9
TH
1269 */
1270void free_percpu(void *ptr)
1271{
129182e5 1272 void *addr;
fbf59bc9 1273 struct pcpu_chunk *chunk;
ccea34b5 1274 unsigned long flags;
fbf59bc9
TH
1275 int off;
1276
1277 if (!ptr)
1278 return;
1279
129182e5
AM
1280 addr = __pcpu_ptr_to_addr(ptr);
1281
ccea34b5 1282 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9
TH
1283
1284 chunk = pcpu_chunk_addr_search(addr);
bba174f5 1285 off = addr - chunk->base_addr;
fbf59bc9
TH
1286
1287 pcpu_free_area(chunk, off);
1288
a56dbddf 1289 /* if there are more than one fully free chunks, wake up grim reaper */
fbf59bc9
TH
1290 if (chunk->free_size == pcpu_unit_size) {
1291 struct pcpu_chunk *pos;
1292
a56dbddf 1293 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
fbf59bc9 1294 if (pos != chunk) {
a56dbddf 1295 schedule_work(&pcpu_reclaim_work);
fbf59bc9
TH
1296 break;
1297 }
1298 }
1299
ccea34b5 1300 spin_unlock_irqrestore(&pcpu_lock, flags);
fbf59bc9
TH
1301}
1302EXPORT_SYMBOL_GPL(free_percpu);
1303
3b034b0d
VG
1304/**
1305 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1306 * @addr: the address to be converted to physical address
1307 *
1308 * Given @addr which is dereferenceable address obtained via one of
1309 * percpu access macros, this function translates it into its physical
1310 * address. The caller is responsible for ensuring @addr stays valid
1311 * until this function finishes.
1312 *
1313 * RETURNS:
1314 * The physical address for @addr.
1315 */
1316phys_addr_t per_cpu_ptr_to_phys(void *addr)
1317{
1318 if ((unsigned long)addr < VMALLOC_START ||
1319 (unsigned long)addr >= VMALLOC_END)
1320 return __pa(addr);
1321 else
1322 return page_to_phys(vmalloc_to_page(addr));
1323}
1324
033e48fb
TH
1325static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1326 size_t reserved_size,
1327 ssize_t *dyn_sizep)
1328{
1329 size_t size_sum;
1330
1331 size_sum = PFN_ALIGN(static_size + reserved_size +
1332 (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1333 if (*dyn_sizep != 0)
1334 *dyn_sizep = size_sum - static_size - reserved_size;
1335
1336 return size_sum;
1337}
1338
fbf59bc9 1339/**
fd1e8a1f
TH
1340 * pcpu_alloc_alloc_info - allocate percpu allocation info
1341 * @nr_groups: the number of groups
1342 * @nr_units: the number of units
1343 *
1344 * Allocate ai which is large enough for @nr_groups groups containing
1345 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1346 * cpu_map array which is long enough for @nr_units and filled with
1347 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1348 * pointer of other groups.
1349 *
1350 * RETURNS:
1351 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1352 * failure.
1353 */
1354struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1355 int nr_units)
1356{
1357 struct pcpu_alloc_info *ai;
1358 size_t base_size, ai_size;
1359 void *ptr;
1360 int unit;
1361
1362 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1363 __alignof__(ai->groups[0].cpu_map[0]));
1364 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1365
1366 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1367 if (!ptr)
1368 return NULL;
1369 ai = ptr;
1370 ptr += base_size;
1371
1372 ai->groups[0].cpu_map = ptr;
1373
1374 for (unit = 0; unit < nr_units; unit++)
1375 ai->groups[0].cpu_map[unit] = NR_CPUS;
1376
1377 ai->nr_groups = nr_groups;
1378 ai->__ai_size = PFN_ALIGN(ai_size);
1379
1380 return ai;
1381}
1382
1383/**
1384 * pcpu_free_alloc_info - free percpu allocation info
1385 * @ai: pcpu_alloc_info to free
1386 *
1387 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1388 */
1389void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1390{
1391 free_bootmem(__pa(ai), ai->__ai_size);
1392}
1393
1394/**
1395 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
edcb4639 1396 * @reserved_size: the size of reserved percpu area in bytes
cafe8816 1397 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
fd1e8a1f
TH
1398 * @atom_size: allocation atom size
1399 * @cpu_distance_fn: callback to determine distance between cpus, optional
033e48fb 1400 *
fd1e8a1f
TH
1401 * This function determines grouping of units, their mappings to cpus
1402 * and other parameters considering needed percpu size, allocation
1403 * atom size and distances between CPUs.
033e48fb 1404 *
fd1e8a1f
TH
1405 * Groups are always mutliples of atom size and CPUs which are of
1406 * LOCAL_DISTANCE both ways are grouped together and share space for
1407 * units in the same group. The returned configuration is guaranteed
1408 * to have CPUs on different nodes on different groups and >=75% usage
1409 * of allocated virtual address space.
033e48fb
TH
1410 *
1411 * RETURNS:
fd1e8a1f
TH
1412 * On success, pointer to the new allocation_info is returned. On
1413 * failure, ERR_PTR value is returned.
033e48fb 1414 */
fd1e8a1f
TH
1415struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1416 size_t reserved_size, ssize_t dyn_size,
1417 size_t atom_size,
1418 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
033e48fb
TH
1419{
1420 static int group_map[NR_CPUS] __initdata;
1421 static int group_cnt[NR_CPUS] __initdata;
1422 const size_t static_size = __per_cpu_end - __per_cpu_start;
fd1e8a1f 1423 int group_cnt_max = 0, nr_groups = 1, nr_units = 0;
033e48fb
TH
1424 size_t size_sum, min_unit_size, alloc_size;
1425 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
fd1e8a1f 1426 int last_allocs, group, unit;
033e48fb 1427 unsigned int cpu, tcpu;
fd1e8a1f
TH
1428 struct pcpu_alloc_info *ai;
1429 unsigned int *cpu_map;
033e48fb 1430
fb59e72e
TH
1431 /* this function may be called multiple times */
1432 memset(group_map, 0, sizeof(group_map));
1433 memset(group_cnt, 0, sizeof(group_map));
1434
033e48fb
TH
1435 /*
1436 * Determine min_unit_size, alloc_size and max_upa such that
fd1e8a1f 1437 * alloc_size is multiple of atom_size and is the smallest
033e48fb
TH
1438 * which can accomodate 4k aligned segments which are equal to
1439 * or larger than min_unit_size.
1440 */
fd1e8a1f 1441 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
033e48fb
TH
1442 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1443
fd1e8a1f 1444 alloc_size = roundup(min_unit_size, atom_size);
033e48fb
TH
1445 upa = alloc_size / min_unit_size;
1446 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1447 upa--;
1448 max_upa = upa;
1449
1450 /* group cpus according to their proximity */
1451 for_each_possible_cpu(cpu) {
1452 group = 0;
1453 next_group:
1454 for_each_possible_cpu(tcpu) {
1455 if (cpu == tcpu)
1456 break;
fd1e8a1f 1457 if (group_map[tcpu] == group && cpu_distance_fn &&
033e48fb
TH
1458 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1459 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1460 group++;
fd1e8a1f 1461 nr_groups = max(nr_groups, group + 1);
033e48fb
TH
1462 goto next_group;
1463 }
1464 }
1465 group_map[cpu] = group;
1466 group_cnt[group]++;
1467 group_cnt_max = max(group_cnt_max, group_cnt[group]);
1468 }
1469
1470 /*
1471 * Expand unit size until address space usage goes over 75%
1472 * and then as much as possible without using more address
1473 * space.
1474 */
1475 last_allocs = INT_MAX;
1476 for (upa = max_upa; upa; upa--) {
1477 int allocs = 0, wasted = 0;
1478
1479 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1480 continue;
1481
fd1e8a1f 1482 for (group = 0; group < nr_groups; group++) {
033e48fb
TH
1483 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1484 allocs += this_allocs;
1485 wasted += this_allocs * upa - group_cnt[group];
1486 }
1487
1488 /*
1489 * Don't accept if wastage is over 25%. The
1490 * greater-than comparison ensures upa==1 always
1491 * passes the following check.
1492 */
1493 if (wasted > num_possible_cpus() / 3)
1494 continue;
1495
1496 /* and then don't consume more memory */
1497 if (allocs > last_allocs)
1498 break;
1499 last_allocs = allocs;
1500 best_upa = upa;
1501 }
fd1e8a1f
TH
1502 upa = best_upa;
1503
1504 /* allocate and fill alloc_info */
1505 for (group = 0; group < nr_groups; group++)
1506 nr_units += roundup(group_cnt[group], upa);
1507
1508 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1509 if (!ai)
1510 return ERR_PTR(-ENOMEM);
1511 cpu_map = ai->groups[0].cpu_map;
1512
1513 for (group = 0; group < nr_groups; group++) {
1514 ai->groups[group].cpu_map = cpu_map;
1515 cpu_map += roundup(group_cnt[group], upa);
1516 }
1517
1518 ai->static_size = static_size;
1519 ai->reserved_size = reserved_size;
1520 ai->dyn_size = dyn_size;
1521 ai->unit_size = alloc_size / upa;
1522 ai->atom_size = atom_size;
1523 ai->alloc_size = alloc_size;
1524
1525 for (group = 0, unit = 0; group_cnt[group]; group++) {
1526 struct pcpu_group_info *gi = &ai->groups[group];
1527
1528 /*
1529 * Initialize base_offset as if all groups are located
1530 * back-to-back. The caller should update this to
1531 * reflect actual allocation.
1532 */
1533 gi->base_offset = unit * ai->unit_size;
033e48fb 1534
033e48fb
TH
1535 for_each_possible_cpu(cpu)
1536 if (group_map[cpu] == group)
fd1e8a1f
TH
1537 gi->cpu_map[gi->nr_units++] = cpu;
1538 gi->nr_units = roundup(gi->nr_units, upa);
1539 unit += gi->nr_units;
033e48fb 1540 }
fd1e8a1f 1541 BUG_ON(unit != nr_units);
033e48fb 1542
fd1e8a1f 1543 return ai;
033e48fb
TH
1544}
1545
fd1e8a1f
TH
1546/**
1547 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1548 * @lvl: loglevel
1549 * @ai: allocation info to dump
1550 *
1551 * Print out information about @ai using loglevel @lvl.
1552 */
1553static void pcpu_dump_alloc_info(const char *lvl,
1554 const struct pcpu_alloc_info *ai)
033e48fb 1555{
fd1e8a1f 1556 int group_width = 1, cpu_width = 1, width;
033e48fb 1557 char empty_str[] = "--------";
fd1e8a1f
TH
1558 int alloc = 0, alloc_end = 0;
1559 int group, v;
1560 int upa, apl; /* units per alloc, allocs per line */
1561
1562 v = ai->nr_groups;
1563 while (v /= 10)
1564 group_width++;
033e48fb 1565
fd1e8a1f 1566 v = num_possible_cpus();
033e48fb 1567 while (v /= 10)
fd1e8a1f
TH
1568 cpu_width++;
1569 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
033e48fb 1570
fd1e8a1f
TH
1571 upa = ai->alloc_size / ai->unit_size;
1572 width = upa * (cpu_width + 1) + group_width + 3;
1573 apl = rounddown_pow_of_two(max(60 / width, 1));
033e48fb 1574
fd1e8a1f
TH
1575 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1576 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1577 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
033e48fb 1578
fd1e8a1f
TH
1579 for (group = 0; group < ai->nr_groups; group++) {
1580 const struct pcpu_group_info *gi = &ai->groups[group];
1581 int unit = 0, unit_end = 0;
1582
1583 BUG_ON(gi->nr_units % upa);
1584 for (alloc_end += gi->nr_units / upa;
1585 alloc < alloc_end; alloc++) {
1586 if (!(alloc % apl)) {
033e48fb 1587 printk("\n");
fd1e8a1f
TH
1588 printk("%spcpu-alloc: ", lvl);
1589 }
1590 printk("[%0*d] ", group_width, group);
1591
1592 for (unit_end += upa; unit < unit_end; unit++)
1593 if (gi->cpu_map[unit] != NR_CPUS)
1594 printk("%0*d ", cpu_width,
1595 gi->cpu_map[unit]);
1596 else
1597 printk("%s ", empty_str);
033e48fb 1598 }
033e48fb
TH
1599 }
1600 printk("\n");
1601}
033e48fb 1602
fbf59bc9 1603/**
8d408b4b 1604 * pcpu_setup_first_chunk - initialize the first percpu chunk
fd1e8a1f 1605 * @ai: pcpu_alloc_info describing how to percpu area is shaped
38a6be52 1606 * @base_addr: mapped address
8d408b4b
TH
1607 *
1608 * Initialize the first percpu chunk which contains the kernel static
1609 * perpcu area. This function is to be called from arch percpu area
38a6be52 1610 * setup path.
8d408b4b 1611 *
fd1e8a1f
TH
1612 * @ai contains all information necessary to initialize the first
1613 * chunk and prime the dynamic percpu allocator.
1614 *
1615 * @ai->static_size is the size of static percpu area.
1616 *
1617 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
edcb4639
TH
1618 * reserve after the static area in the first chunk. This reserves
1619 * the first chunk such that it's available only through reserved
1620 * percpu allocation. This is primarily used to serve module percpu
1621 * static areas on architectures where the addressing model has
1622 * limited offset range for symbol relocations to guarantee module
1623 * percpu symbols fall inside the relocatable range.
1624 *
fd1e8a1f
TH
1625 * @ai->dyn_size determines the number of bytes available for dynamic
1626 * allocation in the first chunk. The area between @ai->static_size +
1627 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
6074d5b0 1628 *
fd1e8a1f
TH
1629 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1630 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1631 * @ai->dyn_size.
8d408b4b 1632 *
fd1e8a1f
TH
1633 * @ai->atom_size is the allocation atom size and used as alignment
1634 * for vm areas.
8d408b4b 1635 *
fd1e8a1f
TH
1636 * @ai->alloc_size is the allocation size and always multiple of
1637 * @ai->atom_size. This is larger than @ai->atom_size if
1638 * @ai->unit_size is larger than @ai->atom_size.
1639 *
1640 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1641 * percpu areas. Units which should be colocated are put into the
1642 * same group. Dynamic VM areas will be allocated according to these
1643 * groupings. If @ai->nr_groups is zero, a single group containing
1644 * all units is assumed.
8d408b4b 1645 *
38a6be52
TH
1646 * The caller should have mapped the first chunk at @base_addr and
1647 * copied static data to each unit.
fbf59bc9 1648 *
edcb4639
TH
1649 * If the first chunk ends up with both reserved and dynamic areas, it
1650 * is served by two chunks - one to serve the core static and reserved
1651 * areas and the other for the dynamic area. They share the same vm
1652 * and page map but uses different area allocation map to stay away
1653 * from each other. The latter chunk is circulated in the chunk slots
1654 * and available for dynamic allocation like any other chunks.
1655 *
fbf59bc9 1656 * RETURNS:
fb435d52 1657 * 0 on success, -errno on failure.
fbf59bc9 1658 */
fb435d52
TH
1659int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1660 void *base_addr)
fbf59bc9 1661{
635b75fc 1662 static char cpus_buf[4096] __initdata;
edcb4639 1663 static int smap[2], dmap[2];
fd1e8a1f
TH
1664 size_t dyn_size = ai->dyn_size;
1665 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
edcb4639 1666 struct pcpu_chunk *schunk, *dchunk = NULL;
6563297c
TH
1667 unsigned long *group_offsets;
1668 size_t *group_sizes;
fb435d52 1669 unsigned long *unit_off;
fbf59bc9 1670 unsigned int cpu;
fd1e8a1f
TH
1671 int *unit_map;
1672 int group, unit, i;
fbf59bc9 1673
635b75fc
TH
1674 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1675
1676#define PCPU_SETUP_BUG_ON(cond) do { \
1677 if (unlikely(cond)) { \
1678 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1679 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1680 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1681 BUG(); \
1682 } \
1683} while (0)
1684
2f39e637 1685 /* sanity checks */
edcb4639
TH
1686 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1687 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
635b75fc
TH
1688 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1689 PCPU_SETUP_BUG_ON(!ai->static_size);
1690 PCPU_SETUP_BUG_ON(!base_addr);
1691 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1692 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1693 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
8d408b4b 1694
6563297c
TH
1695 /* process group information and build config tables accordingly */
1696 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1697 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
fd1e8a1f 1698 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
fb435d52 1699 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
2f39e637 1700
fd1e8a1f 1701 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
ffe0d5a5 1702 unit_map[cpu] = UINT_MAX;
fd1e8a1f 1703 pcpu_first_unit_cpu = NR_CPUS;
2f39e637 1704
fd1e8a1f
TH
1705 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1706 const struct pcpu_group_info *gi = &ai->groups[group];
2f39e637 1707
6563297c
TH
1708 group_offsets[group] = gi->base_offset;
1709 group_sizes[group] = gi->nr_units * ai->unit_size;
1710
fd1e8a1f
TH
1711 for (i = 0; i < gi->nr_units; i++) {
1712 cpu = gi->cpu_map[i];
1713 if (cpu == NR_CPUS)
1714 continue;
8d408b4b 1715
635b75fc
TH
1716 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1717 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1718 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
fbf59bc9 1719
fd1e8a1f 1720 unit_map[cpu] = unit + i;
fb435d52
TH
1721 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1722
fd1e8a1f
TH
1723 if (pcpu_first_unit_cpu == NR_CPUS)
1724 pcpu_first_unit_cpu = cpu;
1725 }
2f39e637 1726 }
fd1e8a1f
TH
1727 pcpu_last_unit_cpu = cpu;
1728 pcpu_nr_units = unit;
1729
1730 for_each_possible_cpu(cpu)
635b75fc
TH
1731 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1732
1733 /* we're done parsing the input, undefine BUG macro and dump config */
1734#undef PCPU_SETUP_BUG_ON
1735 pcpu_dump_alloc_info(KERN_INFO, ai);
fd1e8a1f 1736
6563297c
TH
1737 pcpu_nr_groups = ai->nr_groups;
1738 pcpu_group_offsets = group_offsets;
1739 pcpu_group_sizes = group_sizes;
fd1e8a1f 1740 pcpu_unit_map = unit_map;
fb435d52 1741 pcpu_unit_offsets = unit_off;
2f39e637
TH
1742
1743 /* determine basic parameters */
fd1e8a1f 1744 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
d9b55eeb 1745 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
6563297c 1746 pcpu_atom_size = ai->atom_size;
ce3141a2
TH
1747 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1748 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
cafe8816 1749
d9b55eeb
TH
1750 /*
1751 * Allocate chunk slots. The additional last slot is for
1752 * empty chunks.
1753 */
1754 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
fbf59bc9
TH
1755 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1756 for (i = 0; i < pcpu_nr_slots; i++)
1757 INIT_LIST_HEAD(&pcpu_slot[i]);
1758
edcb4639
TH
1759 /*
1760 * Initialize static chunk. If reserved_size is zero, the
1761 * static chunk covers static area + dynamic allocation area
1762 * in the first chunk. If reserved_size is not zero, it
1763 * covers static area + reserved area (mostly used for module
1764 * static percpu allocation).
1765 */
2441d15c
TH
1766 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1767 INIT_LIST_HEAD(&schunk->list);
bba174f5 1768 schunk->base_addr = base_addr;
61ace7fa
TH
1769 schunk->map = smap;
1770 schunk->map_alloc = ARRAY_SIZE(smap);
38a6be52 1771 schunk->immutable = true;
ce3141a2 1772 bitmap_fill(schunk->populated, pcpu_unit_pages);
edcb4639 1773
fd1e8a1f
TH
1774 if (ai->reserved_size) {
1775 schunk->free_size = ai->reserved_size;
ae9e6bc9 1776 pcpu_reserved_chunk = schunk;
fd1e8a1f 1777 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
edcb4639
TH
1778 } else {
1779 schunk->free_size = dyn_size;
1780 dyn_size = 0; /* dynamic area covered */
1781 }
2441d15c 1782 schunk->contig_hint = schunk->free_size;
fbf59bc9 1783
fd1e8a1f 1784 schunk->map[schunk->map_used++] = -ai->static_size;
61ace7fa
TH
1785 if (schunk->free_size)
1786 schunk->map[schunk->map_used++] = schunk->free_size;
1787
edcb4639
TH
1788 /* init dynamic chunk if necessary */
1789 if (dyn_size) {
ce3141a2 1790 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
edcb4639 1791 INIT_LIST_HEAD(&dchunk->list);
bba174f5 1792 dchunk->base_addr = base_addr;
edcb4639
TH
1793 dchunk->map = dmap;
1794 dchunk->map_alloc = ARRAY_SIZE(dmap);
38a6be52 1795 dchunk->immutable = true;
ce3141a2 1796 bitmap_fill(dchunk->populated, pcpu_unit_pages);
edcb4639
TH
1797
1798 dchunk->contig_hint = dchunk->free_size = dyn_size;
1799 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1800 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1801 }
1802
2441d15c 1803 /* link the first chunk in */
ae9e6bc9
TH
1804 pcpu_first_chunk = dchunk ?: schunk;
1805 pcpu_chunk_relocate(pcpu_first_chunk, -1);
fbf59bc9
TH
1806
1807 /* we're done */
bba174f5 1808 pcpu_base_addr = base_addr;
fb435d52 1809 return 0;
fbf59bc9 1810}
66c3a757 1811
f58dc01b
TH
1812const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1813 [PCPU_FC_AUTO] = "auto",
1814 [PCPU_FC_EMBED] = "embed",
1815 [PCPU_FC_PAGE] = "page",
f58dc01b 1816};
66c3a757 1817
f58dc01b 1818enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
66c3a757 1819
f58dc01b
TH
1820static int __init percpu_alloc_setup(char *str)
1821{
1822 if (0)
1823 /* nada */;
1824#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1825 else if (!strcmp(str, "embed"))
1826 pcpu_chosen_fc = PCPU_FC_EMBED;
1827#endif
1828#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1829 else if (!strcmp(str, "page"))
1830 pcpu_chosen_fc = PCPU_FC_PAGE;
f58dc01b
TH
1831#endif
1832 else
1833 pr_warning("PERCPU: unknown allocator %s specified\n", str);
66c3a757 1834
f58dc01b 1835 return 0;
66c3a757 1836}
f58dc01b 1837early_param("percpu_alloc", percpu_alloc_setup);
66c3a757 1838
08fc4580
TH
1839#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1840 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
66c3a757
TH
1841/**
1842 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
66c3a757
TH
1843 * @reserved_size: the size of reserved percpu area in bytes
1844 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
c8826dd5
TH
1845 * @atom_size: allocation atom size
1846 * @cpu_distance_fn: callback to determine distance between cpus, optional
1847 * @alloc_fn: function to allocate percpu page
1848 * @free_fn: funtion to free percpu page
66c3a757
TH
1849 *
1850 * This is a helper to ease setting up embedded first percpu chunk and
1851 * can be called where pcpu_setup_first_chunk() is expected.
1852 *
1853 * If this function is used to setup the first chunk, it is allocated
c8826dd5
TH
1854 * by calling @alloc_fn and used as-is without being mapped into
1855 * vmalloc area. Allocations are always whole multiples of @atom_size
1856 * aligned to @atom_size.
1857 *
1858 * This enables the first chunk to piggy back on the linear physical
1859 * mapping which often uses larger page size. Please note that this
1860 * can result in very sparse cpu->unit mapping on NUMA machines thus
1861 * requiring large vmalloc address space. Don't use this allocator if
1862 * vmalloc space is not orders of magnitude larger than distances
1863 * between node memory addresses (ie. 32bit NUMA machines).
66c3a757
TH
1864 *
1865 * When @dyn_size is positive, dynamic area might be larger than
788e5abc
TH
1866 * specified to fill page alignment. When @dyn_size is auto,
1867 * @dyn_size is just big enough to fill page alignment after static
1868 * and reserved areas.
66c3a757
TH
1869 *
1870 * If the needed size is smaller than the minimum or specified unit
c8826dd5 1871 * size, the leftover is returned using @free_fn.
66c3a757
TH
1872 *
1873 * RETURNS:
fb435d52 1874 * 0 on success, -errno on failure.
66c3a757 1875 */
c8826dd5
TH
1876int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1877 size_t atom_size,
1878 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1879 pcpu_fc_alloc_fn_t alloc_fn,
1880 pcpu_fc_free_fn_t free_fn)
66c3a757 1881{
c8826dd5
TH
1882 void *base = (void *)ULONG_MAX;
1883 void **areas = NULL;
fd1e8a1f 1884 struct pcpu_alloc_info *ai;
6ea529a2 1885 size_t size_sum, areas_size, max_distance;
c8826dd5 1886 int group, i, rc;
66c3a757 1887
c8826dd5
TH
1888 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1889 cpu_distance_fn);
fd1e8a1f
TH
1890 if (IS_ERR(ai))
1891 return PTR_ERR(ai);
66c3a757 1892
fd1e8a1f 1893 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
c8826dd5 1894 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
fa8a7094 1895
c8826dd5
TH
1896 areas = alloc_bootmem_nopanic(areas_size);
1897 if (!areas) {
fb435d52 1898 rc = -ENOMEM;
c8826dd5 1899 goto out_free;
fa8a7094 1900 }
66c3a757 1901
c8826dd5
TH
1902 /* allocate, copy and determine base address */
1903 for (group = 0; group < ai->nr_groups; group++) {
1904 struct pcpu_group_info *gi = &ai->groups[group];
1905 unsigned int cpu = NR_CPUS;
1906 void *ptr;
1907
1908 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1909 cpu = gi->cpu_map[i];
1910 BUG_ON(cpu == NR_CPUS);
1911
1912 /* allocate space for the whole group */
1913 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1914 if (!ptr) {
1915 rc = -ENOMEM;
1916 goto out_free_areas;
1917 }
1918 areas[group] = ptr;
fd1e8a1f 1919
c8826dd5
TH
1920 base = min(ptr, base);
1921
1922 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1923 if (gi->cpu_map[i] == NR_CPUS) {
1924 /* unused unit, free whole */
1925 free_fn(ptr, ai->unit_size);
1926 continue;
1927 }
1928 /* copy and return the unused part */
1929 memcpy(ptr, __per_cpu_load, ai->static_size);
1930 free_fn(ptr + size_sum, ai->unit_size - size_sum);
1931 }
fa8a7094 1932 }
66c3a757 1933
c8826dd5 1934 /* base address is now known, determine group base offsets */
6ea529a2
TH
1935 max_distance = 0;
1936 for (group = 0; group < ai->nr_groups; group++) {
c8826dd5 1937 ai->groups[group].base_offset = areas[group] - base;
1a0c3298
TH
1938 max_distance = max_t(size_t, max_distance,
1939 ai->groups[group].base_offset);
6ea529a2
TH
1940 }
1941 max_distance += ai->unit_size;
1942
1943 /* warn if maximum distance is further than 75% of vmalloc space */
1944 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1a0c3298 1945 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
6ea529a2
TH
1946 "space 0x%lx\n",
1947 max_distance, VMALLOC_END - VMALLOC_START);
1948#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1949 /* and fail if we have fallback */
1950 rc = -EINVAL;
1951 goto out_free;
1952#endif
1953 }
c8826dd5 1954
004018e2 1955 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
fd1e8a1f
TH
1956 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1957 ai->dyn_size, ai->unit_size);
d4b95f80 1958
fb435d52 1959 rc = pcpu_setup_first_chunk(ai, base);
c8826dd5
TH
1960 goto out_free;
1961
1962out_free_areas:
1963 for (group = 0; group < ai->nr_groups; group++)
1964 free_fn(areas[group],
1965 ai->groups[group].nr_units * ai->unit_size);
1966out_free:
fd1e8a1f 1967 pcpu_free_alloc_info(ai);
c8826dd5
TH
1968 if (areas)
1969 free_bootmem(__pa(areas), areas_size);
fb435d52 1970 return rc;
d4b95f80 1971}
08fc4580
TH
1972#endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1973 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
d4b95f80 1974
08fc4580 1975#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
d4b95f80 1976/**
00ae4064 1977 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
d4b95f80
TH
1978 * @reserved_size: the size of reserved percpu area in bytes
1979 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1980 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1981 * @populate_pte_fn: function to populate pte
1982 *
00ae4064
TH
1983 * This is a helper to ease setting up page-remapped first percpu
1984 * chunk and can be called where pcpu_setup_first_chunk() is expected.
d4b95f80
TH
1985 *
1986 * This is the basic allocator. Static percpu area is allocated
1987 * page-by-page into vmalloc area.
1988 *
1989 * RETURNS:
fb435d52 1990 * 0 on success, -errno on failure.
d4b95f80 1991 */
fb435d52
TH
1992int __init pcpu_page_first_chunk(size_t reserved_size,
1993 pcpu_fc_alloc_fn_t alloc_fn,
1994 pcpu_fc_free_fn_t free_fn,
1995 pcpu_fc_populate_pte_fn_t populate_pte_fn)
d4b95f80 1996{
8f05a6a6 1997 static struct vm_struct vm;
fd1e8a1f 1998 struct pcpu_alloc_info *ai;
00ae4064 1999 char psize_str[16];
ce3141a2 2000 int unit_pages;
d4b95f80 2001 size_t pages_size;
ce3141a2 2002 struct page **pages;
fb435d52 2003 int unit, i, j, rc;
d4b95f80 2004
00ae4064
TH
2005 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2006
fd1e8a1f
TH
2007 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
2008 if (IS_ERR(ai))
2009 return PTR_ERR(ai);
2010 BUG_ON(ai->nr_groups != 1);
2011 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
2012
2013 unit_pages = ai->unit_size >> PAGE_SHIFT;
d4b95f80
TH
2014
2015 /* unaligned allocations can't be freed, round up to page size */
fd1e8a1f
TH
2016 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2017 sizeof(pages[0]));
ce3141a2 2018 pages = alloc_bootmem(pages_size);
d4b95f80 2019
8f05a6a6 2020 /* allocate pages */
d4b95f80 2021 j = 0;
fd1e8a1f 2022 for (unit = 0; unit < num_possible_cpus(); unit++)
ce3141a2 2023 for (i = 0; i < unit_pages; i++) {
fd1e8a1f 2024 unsigned int cpu = ai->groups[0].cpu_map[unit];
d4b95f80
TH
2025 void *ptr;
2026
3cbc8565 2027 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
d4b95f80 2028 if (!ptr) {
00ae4064
TH
2029 pr_warning("PERCPU: failed to allocate %s page "
2030 "for cpu%u\n", psize_str, cpu);
d4b95f80
TH
2031 goto enomem;
2032 }
ce3141a2 2033 pages[j++] = virt_to_page(ptr);
d4b95f80
TH
2034 }
2035
8f05a6a6
TH
2036 /* allocate vm area, map the pages and copy static data */
2037 vm.flags = VM_ALLOC;
fd1e8a1f 2038 vm.size = num_possible_cpus() * ai->unit_size;
8f05a6a6
TH
2039 vm_area_register_early(&vm, PAGE_SIZE);
2040
fd1e8a1f 2041 for (unit = 0; unit < num_possible_cpus(); unit++) {
1d9d3257 2042 unsigned long unit_addr =
fd1e8a1f 2043 (unsigned long)vm.addr + unit * ai->unit_size;
8f05a6a6 2044
ce3141a2 2045 for (i = 0; i < unit_pages; i++)
8f05a6a6
TH
2046 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2047
2048 /* pte already populated, the following shouldn't fail */
fb435d52
TH
2049 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2050 unit_pages);
2051 if (rc < 0)
2052 panic("failed to map percpu area, err=%d\n", rc);
66c3a757 2053
8f05a6a6
TH
2054 /*
2055 * FIXME: Archs with virtual cache should flush local
2056 * cache for the linear mapping here - something
2057 * equivalent to flush_cache_vmap() on the local cpu.
2058 * flush_cache_vmap() can't be used as most supporting
2059 * data structures are not set up yet.
2060 */
2061
2062 /* copy static data */
fd1e8a1f 2063 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
66c3a757
TH
2064 }
2065
2066 /* we're ready, commit */
1d9d3257 2067 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
fd1e8a1f
TH
2068 unit_pages, psize_str, vm.addr, ai->static_size,
2069 ai->reserved_size, ai->dyn_size);
d4b95f80 2070
fb435d52 2071 rc = pcpu_setup_first_chunk(ai, vm.addr);
d4b95f80
TH
2072 goto out_free_ar;
2073
2074enomem:
2075 while (--j >= 0)
ce3141a2 2076 free_fn(page_address(pages[j]), PAGE_SIZE);
fb435d52 2077 rc = -ENOMEM;
d4b95f80 2078out_free_ar:
ce3141a2 2079 free_bootmem(__pa(pages), pages_size);
fd1e8a1f 2080 pcpu_free_alloc_info(ai);
fb435d52 2081 return rc;
d4b95f80 2082}
08fc4580 2083#endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
d4b95f80 2084
e74e3962
TH
2085/*
2086 * Generic percpu area setup.
2087 *
2088 * The embedding helper is used because its behavior closely resembles
2089 * the original non-dynamic generic percpu area setup. This is
2090 * important because many archs have addressing restrictions and might
2091 * fail if the percpu area is located far away from the previous
2092 * location. As an added bonus, in non-NUMA cases, embedding is
2093 * generally a good idea TLB-wise because percpu area can piggy back
2094 * on the physical linear memory mapping which uses large page
2095 * mappings on applicable archs.
2096 */
2097#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2098unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2099EXPORT_SYMBOL(__per_cpu_offset);
2100
c8826dd5
TH
2101static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2102 size_t align)
2103{
2104 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
2105}
66c3a757 2106
c8826dd5
TH
2107static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2108{
2109 free_bootmem(__pa(ptr), size);
2110}
2111
e74e3962
TH
2112void __init setup_per_cpu_areas(void)
2113{
e74e3962
TH
2114 unsigned long delta;
2115 unsigned int cpu;
fb435d52 2116 int rc;
e74e3962
TH
2117
2118 /*
2119 * Always reserve area for module percpu variables. That's
2120 * what the legacy allocator did.
2121 */
fb435d52 2122 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
c8826dd5
TH
2123 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2124 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
fb435d52 2125 if (rc < 0)
e74e3962
TH
2126 panic("Failed to initialized percpu areas.");
2127
2128 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2129 for_each_possible_cpu(cpu)
fb435d52 2130 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
66c3a757 2131}
e74e3962 2132#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */