]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/percpu.c
percpu: reorder a few functions in mm/percpu.c
[net-next-2.6.git] / mm / percpu.c
CommitLineData
fbf59bc9
TH
1/*
2 * linux/mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
17 *
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
22 *
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
e1b9aa3f 26 * percpu base registers pcpu_unit_size apart.
fbf59bc9
TH
27 *
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
35 *
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
e1b9aa3f
CL
41 * Chunks can be determined from the address using the index field
42 * in the page struct. The index field contains a pointer to the chunk.
fbf59bc9
TH
43 *
44 * To use this allocator, arch code should do the followings.
45 *
e74e3962 46 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
fbf59bc9
TH
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
e0100983
TH
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
fbf59bc9 51 *
8d408b4b
TH
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
fbf59bc9
TH
54 */
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
58#include <linux/list.h>
59#include <linux/mm.h>
60#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/percpu.h>
63#include <linux/pfn.h>
fbf59bc9 64#include <linux/slab.h>
ccea34b5 65#include <linux/spinlock.h>
fbf59bc9 66#include <linux/vmalloc.h>
a56dbddf 67#include <linux/workqueue.h>
fbf59bc9
TH
68
69#include <asm/cacheflush.h>
e0100983 70#include <asm/sections.h>
fbf59bc9
TH
71#include <asm/tlbflush.h>
72
fbf59bc9
TH
73#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
74#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
75
e0100983
TH
76/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
77#ifndef __addr_to_pcpu_ptr
78#define __addr_to_pcpu_ptr(addr) \
79 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
80 + (unsigned long)__per_cpu_start)
81#endif
82#ifndef __pcpu_ptr_to_addr
83#define __pcpu_ptr_to_addr(ptr) \
84 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
85 - (unsigned long)__per_cpu_start)
86#endif
87
fbf59bc9
TH
88struct pcpu_chunk {
89 struct list_head list; /* linked to pcpu_slot lists */
fbf59bc9
TH
90 int free_size; /* free bytes in the chunk */
91 int contig_hint; /* max contiguous size hint */
92 struct vm_struct *vm; /* mapped vmalloc region */
93 int map_used; /* # of map entries used */
94 int map_alloc; /* # of map entries allocated */
95 int *map; /* allocation map */
8d408b4b 96 bool immutable; /* no [de]population allowed */
3e24aa58
TH
97 struct page **page; /* points to page array */
98 struct page *page_ar[]; /* #cpus * UNIT_PAGES */
fbf59bc9
TH
99};
100
40150d37
TH
101static int pcpu_unit_pages __read_mostly;
102static int pcpu_unit_size __read_mostly;
103static int pcpu_chunk_size __read_mostly;
104static int pcpu_nr_slots __read_mostly;
105static size_t pcpu_chunk_struct_size __read_mostly;
fbf59bc9
TH
106
107/* the address of the first chunk which starts with the kernel static area */
40150d37 108void *pcpu_base_addr __read_mostly;
fbf59bc9
TH
109EXPORT_SYMBOL_GPL(pcpu_base_addr);
110
ae9e6bc9
TH
111/*
112 * The first chunk which always exists. Note that unlike other
113 * chunks, this one can be allocated and mapped in several different
114 * ways and thus often doesn't live in the vmalloc area.
115 */
116static struct pcpu_chunk *pcpu_first_chunk;
117
118/*
119 * Optional reserved chunk. This chunk reserves part of the first
120 * chunk and serves it for reserved allocations. The amount of
121 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
122 * area doesn't exist, the following variables contain NULL and 0
123 * respectively.
124 */
edcb4639 125static struct pcpu_chunk *pcpu_reserved_chunk;
edcb4639
TH
126static int pcpu_reserved_chunk_limit;
127
fbf59bc9 128/*
ccea34b5
TH
129 * Synchronization rules.
130 *
131 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
132 * protects allocation/reclaim paths, chunks and chunk->page arrays.
133 * The latter is a spinlock and protects the index data structures -
e1b9aa3f 134 * chunk slots, chunks and area maps in chunks.
ccea34b5
TH
135 *
136 * During allocation, pcpu_alloc_mutex is kept locked all the time and
137 * pcpu_lock is grabbed and released as necessary. All actual memory
138 * allocations are done using GFP_KERNEL with pcpu_lock released.
139 *
140 * Free path accesses and alters only the index data structures, so it
141 * can be safely called from atomic context. When memory needs to be
142 * returned to the system, free path schedules reclaim_work which
143 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
144 * reclaimed, release both locks and frees the chunks. Note that it's
145 * necessary to grab both locks to remove a chunk from circulation as
146 * allocation path might be referencing the chunk with only
147 * pcpu_alloc_mutex locked.
fbf59bc9 148 */
ccea34b5
TH
149static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
150static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
fbf59bc9 151
40150d37 152static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
fbf59bc9 153
a56dbddf
TH
154/* reclaim work to release fully free chunks, scheduled from free path */
155static void pcpu_reclaim(struct work_struct *work);
156static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
157
d9b55eeb 158static int __pcpu_size_to_slot(int size)
fbf59bc9 159{
cae3aeb8 160 int highbit = fls(size); /* size is in bytes */
fbf59bc9
TH
161 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
162}
163
d9b55eeb
TH
164static int pcpu_size_to_slot(int size)
165{
166 if (size == pcpu_unit_size)
167 return pcpu_nr_slots - 1;
168 return __pcpu_size_to_slot(size);
169}
170
fbf59bc9
TH
171static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
172{
173 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
174 return 0;
175
176 return pcpu_size_to_slot(chunk->free_size);
177}
178
179static int pcpu_page_idx(unsigned int cpu, int page_idx)
180{
d9b55eeb 181 return cpu * pcpu_unit_pages + page_idx;
fbf59bc9
TH
182}
183
fbf59bc9
TH
184static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
185 unsigned int cpu, int page_idx)
186{
187 return (unsigned long)chunk->vm->addr +
188 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
189}
190
c8a51be4
TH
191static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
192 unsigned int cpu, int page_idx)
193{
194 return &chunk->page[pcpu_page_idx(cpu, page_idx)];
195}
196
fbf59bc9
TH
197static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
198 int page_idx)
199{
200 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
201}
202
e1b9aa3f
CL
203/* set the pointer to a chunk in a page struct */
204static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
205{
206 page->index = (unsigned long)pcpu;
207}
208
209/* obtain pointer to a chunk from a page struct */
210static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
211{
212 return (struct pcpu_chunk *)page->index;
213}
214
fbf59bc9 215/**
1880d93b
TH
216 * pcpu_mem_alloc - allocate memory
217 * @size: bytes to allocate
fbf59bc9 218 *
1880d93b
TH
219 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
220 * kzalloc() is used; otherwise, vmalloc() is used. The returned
221 * memory is always zeroed.
fbf59bc9 222 *
ccea34b5
TH
223 * CONTEXT:
224 * Does GFP_KERNEL allocation.
225 *
fbf59bc9 226 * RETURNS:
1880d93b 227 * Pointer to the allocated area on success, NULL on failure.
fbf59bc9 228 */
1880d93b 229static void *pcpu_mem_alloc(size_t size)
fbf59bc9 230{
1880d93b
TH
231 if (size <= PAGE_SIZE)
232 return kzalloc(size, GFP_KERNEL);
233 else {
234 void *ptr = vmalloc(size);
235 if (ptr)
236 memset(ptr, 0, size);
237 return ptr;
238 }
239}
fbf59bc9 240
1880d93b
TH
241/**
242 * pcpu_mem_free - free memory
243 * @ptr: memory to free
244 * @size: size of the area
245 *
246 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
247 */
248static void pcpu_mem_free(void *ptr, size_t size)
249{
fbf59bc9 250 if (size <= PAGE_SIZE)
1880d93b 251 kfree(ptr);
fbf59bc9 252 else
1880d93b 253 vfree(ptr);
fbf59bc9
TH
254}
255
256/**
257 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
258 * @chunk: chunk of interest
259 * @oslot: the previous slot it was on
260 *
261 * This function is called after an allocation or free changed @chunk.
262 * New slot according to the changed state is determined and @chunk is
edcb4639
TH
263 * moved to the slot. Note that the reserved chunk is never put on
264 * chunk slots.
ccea34b5
TH
265 *
266 * CONTEXT:
267 * pcpu_lock.
fbf59bc9
TH
268 */
269static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
270{
271 int nslot = pcpu_chunk_slot(chunk);
272
edcb4639 273 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
fbf59bc9
TH
274 if (oslot < nslot)
275 list_move(&chunk->list, &pcpu_slot[nslot]);
276 else
277 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
278 }
279}
280
fbf59bc9 281/**
e1b9aa3f
CL
282 * pcpu_chunk_addr_search - determine chunk containing specified address
283 * @addr: address for which the chunk needs to be determined.
ccea34b5 284 *
fbf59bc9
TH
285 * RETURNS:
286 * The address of the found chunk.
287 */
288static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
289{
ae9e6bc9 290 void *first_start = pcpu_first_chunk->vm->addr;
fbf59bc9 291
ae9e6bc9 292 /* is it in the first chunk? */
79ba6ac8 293 if (addr >= first_start && addr < first_start + pcpu_unit_size) {
ae9e6bc9
TH
294 /* is it in the reserved area? */
295 if (addr < first_start + pcpu_reserved_chunk_limit)
edcb4639 296 return pcpu_reserved_chunk;
ae9e6bc9 297 return pcpu_first_chunk;
edcb4639
TH
298 }
299
e1b9aa3f 300 return pcpu_get_page_chunk(vmalloc_to_page(addr));
fbf59bc9
TH
301}
302
9f7dcf22
TH
303/**
304 * pcpu_extend_area_map - extend area map for allocation
305 * @chunk: target chunk
306 *
307 * Extend area map of @chunk so that it can accomodate an allocation.
308 * A single allocation can split an area into three areas, so this
309 * function makes sure that @chunk->map has at least two extra slots.
310 *
ccea34b5
TH
311 * CONTEXT:
312 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
313 * if area map is extended.
314 *
9f7dcf22
TH
315 * RETURNS:
316 * 0 if noop, 1 if successfully extended, -errno on failure.
317 */
318static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
319{
320 int new_alloc;
321 int *new;
322 size_t size;
323
324 /* has enough? */
325 if (chunk->map_alloc >= chunk->map_used + 2)
326 return 0;
327
ccea34b5
TH
328 spin_unlock_irq(&pcpu_lock);
329
9f7dcf22
TH
330 new_alloc = PCPU_DFL_MAP_ALLOC;
331 while (new_alloc < chunk->map_used + 2)
332 new_alloc *= 2;
333
334 new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
ccea34b5
TH
335 if (!new) {
336 spin_lock_irq(&pcpu_lock);
9f7dcf22 337 return -ENOMEM;
ccea34b5
TH
338 }
339
340 /*
341 * Acquire pcpu_lock and switch to new area map. Only free
342 * could have happened inbetween, so map_used couldn't have
343 * grown.
344 */
345 spin_lock_irq(&pcpu_lock);
346 BUG_ON(new_alloc < chunk->map_used + 2);
9f7dcf22
TH
347
348 size = chunk->map_alloc * sizeof(chunk->map[0]);
349 memcpy(new, chunk->map, size);
350
351 /*
352 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
353 * one of the first chunks and still using static map.
354 */
355 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
356 pcpu_mem_free(chunk->map, size);
357
358 chunk->map_alloc = new_alloc;
359 chunk->map = new;
360 return 0;
361}
362
fbf59bc9
TH
363/**
364 * pcpu_split_block - split a map block
365 * @chunk: chunk of interest
366 * @i: index of map block to split
cae3aeb8
TH
367 * @head: head size in bytes (can be 0)
368 * @tail: tail size in bytes (can be 0)
fbf59bc9
TH
369 *
370 * Split the @i'th map block into two or three blocks. If @head is
371 * non-zero, @head bytes block is inserted before block @i moving it
372 * to @i+1 and reducing its size by @head bytes.
373 *
374 * If @tail is non-zero, the target block, which can be @i or @i+1
375 * depending on @head, is reduced by @tail bytes and @tail byte block
376 * is inserted after the target block.
377 *
9f7dcf22 378 * @chunk->map must have enough free slots to accomodate the split.
ccea34b5
TH
379 *
380 * CONTEXT:
381 * pcpu_lock.
fbf59bc9 382 */
9f7dcf22
TH
383static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
384 int head, int tail)
fbf59bc9
TH
385{
386 int nr_extra = !!head + !!tail;
1880d93b 387
9f7dcf22 388 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
fbf59bc9 389
9f7dcf22 390 /* insert new subblocks */
fbf59bc9
TH
391 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
392 sizeof(chunk->map[0]) * (chunk->map_used - i));
393 chunk->map_used += nr_extra;
394
395 if (head) {
396 chunk->map[i + 1] = chunk->map[i] - head;
397 chunk->map[i++] = head;
398 }
399 if (tail) {
400 chunk->map[i++] -= tail;
401 chunk->map[i] = tail;
402 }
fbf59bc9
TH
403}
404
405/**
406 * pcpu_alloc_area - allocate area from a pcpu_chunk
407 * @chunk: chunk of interest
cae3aeb8 408 * @size: wanted size in bytes
fbf59bc9
TH
409 * @align: wanted align
410 *
411 * Try to allocate @size bytes area aligned at @align from @chunk.
412 * Note that this function only allocates the offset. It doesn't
413 * populate or map the area.
414 *
9f7dcf22
TH
415 * @chunk->map must have at least two free slots.
416 *
ccea34b5
TH
417 * CONTEXT:
418 * pcpu_lock.
419 *
fbf59bc9 420 * RETURNS:
9f7dcf22
TH
421 * Allocated offset in @chunk on success, -1 if no matching area is
422 * found.
fbf59bc9
TH
423 */
424static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
425{
426 int oslot = pcpu_chunk_slot(chunk);
427 int max_contig = 0;
428 int i, off;
429
fbf59bc9
TH
430 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
431 bool is_last = i + 1 == chunk->map_used;
432 int head, tail;
433
434 /* extra for alignment requirement */
435 head = ALIGN(off, align) - off;
436 BUG_ON(i == 0 && head != 0);
437
438 if (chunk->map[i] < 0)
439 continue;
440 if (chunk->map[i] < head + size) {
441 max_contig = max(chunk->map[i], max_contig);
442 continue;
443 }
444
445 /*
446 * If head is small or the previous block is free,
447 * merge'em. Note that 'small' is defined as smaller
448 * than sizeof(int), which is very small but isn't too
449 * uncommon for percpu allocations.
450 */
451 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
452 if (chunk->map[i - 1] > 0)
453 chunk->map[i - 1] += head;
454 else {
455 chunk->map[i - 1] -= head;
456 chunk->free_size -= head;
457 }
458 chunk->map[i] -= head;
459 off += head;
460 head = 0;
461 }
462
463 /* if tail is small, just keep it around */
464 tail = chunk->map[i] - head - size;
465 if (tail < sizeof(int))
466 tail = 0;
467
468 /* split if warranted */
469 if (head || tail) {
9f7dcf22 470 pcpu_split_block(chunk, i, head, tail);
fbf59bc9
TH
471 if (head) {
472 i++;
473 off += head;
474 max_contig = max(chunk->map[i - 1], max_contig);
475 }
476 if (tail)
477 max_contig = max(chunk->map[i + 1], max_contig);
478 }
479
480 /* update hint and mark allocated */
481 if (is_last)
482 chunk->contig_hint = max_contig; /* fully scanned */
483 else
484 chunk->contig_hint = max(chunk->contig_hint,
485 max_contig);
486
487 chunk->free_size -= chunk->map[i];
488 chunk->map[i] = -chunk->map[i];
489
490 pcpu_chunk_relocate(chunk, oslot);
491 return off;
492 }
493
494 chunk->contig_hint = max_contig; /* fully scanned */
495 pcpu_chunk_relocate(chunk, oslot);
496
9f7dcf22
TH
497 /* tell the upper layer that this chunk has no matching area */
498 return -1;
fbf59bc9
TH
499}
500
501/**
502 * pcpu_free_area - free area to a pcpu_chunk
503 * @chunk: chunk of interest
504 * @freeme: offset of area to free
505 *
506 * Free area starting from @freeme to @chunk. Note that this function
507 * only modifies the allocation map. It doesn't depopulate or unmap
508 * the area.
ccea34b5
TH
509 *
510 * CONTEXT:
511 * pcpu_lock.
fbf59bc9
TH
512 */
513static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
514{
515 int oslot = pcpu_chunk_slot(chunk);
516 int i, off;
517
518 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
519 if (off == freeme)
520 break;
521 BUG_ON(off != freeme);
522 BUG_ON(chunk->map[i] > 0);
523
524 chunk->map[i] = -chunk->map[i];
525 chunk->free_size += chunk->map[i];
526
527 /* merge with previous? */
528 if (i > 0 && chunk->map[i - 1] >= 0) {
529 chunk->map[i - 1] += chunk->map[i];
530 chunk->map_used--;
531 memmove(&chunk->map[i], &chunk->map[i + 1],
532 (chunk->map_used - i) * sizeof(chunk->map[0]));
533 i--;
534 }
535 /* merge with next? */
536 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
537 chunk->map[i] += chunk->map[i + 1];
538 chunk->map_used--;
539 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
540 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
541 }
542
543 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
544 pcpu_chunk_relocate(chunk, oslot);
545}
546
547/**
548 * pcpu_unmap - unmap pages out of a pcpu_chunk
549 * @chunk: chunk of interest
550 * @page_start: page index of the first page to unmap
551 * @page_end: page index of the last page to unmap + 1
85ae87c1 552 * @flush_tlb: whether to flush tlb or not
fbf59bc9
TH
553 *
554 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
555 * If @flush is true, vcache is flushed before unmapping and tlb
556 * after.
557 */
558static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
85ae87c1 559 bool flush_tlb)
fbf59bc9
TH
560{
561 unsigned int last = num_possible_cpus() - 1;
562 unsigned int cpu;
563
8d408b4b
TH
564 /* unmap must not be done on immutable chunk */
565 WARN_ON(chunk->immutable);
566
fbf59bc9
TH
567 /*
568 * Each flushing trial can be very expensive, issue flush on
569 * the whole region at once rather than doing it for each cpu.
570 * This could be an overkill but is more scalable.
571 */
85ae87c1
TH
572 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
573 pcpu_chunk_addr(chunk, last, page_end));
fbf59bc9
TH
574
575 for_each_possible_cpu(cpu)
576 unmap_kernel_range_noflush(
577 pcpu_chunk_addr(chunk, cpu, page_start),
578 (page_end - page_start) << PAGE_SHIFT);
579
580 /* ditto as flush_cache_vunmap() */
85ae87c1 581 if (flush_tlb)
fbf59bc9
TH
582 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
583 pcpu_chunk_addr(chunk, last, page_end));
584}
585
c8a51be4
TH
586static int __pcpu_map_pages(unsigned long addr, struct page **pages,
587 int nr_pages)
588{
589 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
590 PAGE_KERNEL, pages);
591}
592
593/**
594 * pcpu_map - map pages into a pcpu_chunk
595 * @chunk: chunk of interest
596 * @page_start: page index of the first page to map
597 * @page_end: page index of the last page to map + 1
598 *
599 * For each cpu, map pages [@page_start,@page_end) into @chunk.
600 * vcache is flushed afterwards.
601 */
602static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
603{
604 unsigned int last = num_possible_cpus() - 1;
605 unsigned int cpu;
606 int err;
607
608 /* map must not be done on immutable chunk */
609 WARN_ON(chunk->immutable);
610
611 for_each_possible_cpu(cpu) {
612 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
613 pcpu_chunk_pagep(chunk, cpu, page_start),
614 page_end - page_start);
615 if (err < 0)
616 return err;
617 }
618
619 /* flush at once, please read comments in pcpu_unmap() */
620 flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
621 pcpu_chunk_addr(chunk, last, page_end));
622 return 0;
623}
624
fbf59bc9
TH
625/**
626 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
627 * @chunk: chunk to depopulate
628 * @off: offset to the area to depopulate
cae3aeb8 629 * @size: size of the area to depopulate in bytes
fbf59bc9
TH
630 * @flush: whether to flush cache and tlb or not
631 *
632 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
633 * from @chunk. If @flush is true, vcache is flushed before unmapping
634 * and tlb after.
ccea34b5
TH
635 *
636 * CONTEXT:
637 * pcpu_alloc_mutex.
fbf59bc9 638 */
cae3aeb8
TH
639static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
640 bool flush)
fbf59bc9
TH
641{
642 int page_start = PFN_DOWN(off);
643 int page_end = PFN_UP(off + size);
644 int unmap_start = -1;
645 int uninitialized_var(unmap_end);
646 unsigned int cpu;
647 int i;
648
649 for (i = page_start; i < page_end; i++) {
650 for_each_possible_cpu(cpu) {
651 struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
652
653 if (!*pagep)
654 continue;
655
656 __free_page(*pagep);
657
658 /*
659 * If it's partial depopulation, it might get
660 * populated or depopulated again. Mark the
661 * page gone.
662 */
663 *pagep = NULL;
664
665 unmap_start = unmap_start < 0 ? i : unmap_start;
666 unmap_end = i + 1;
667 }
668 }
669
670 if (unmap_start >= 0)
671 pcpu_unmap(chunk, unmap_start, unmap_end, flush);
672}
673
fbf59bc9
TH
674/**
675 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
676 * @chunk: chunk of interest
677 * @off: offset to the area to populate
cae3aeb8 678 * @size: size of the area to populate in bytes
fbf59bc9
TH
679 *
680 * For each cpu, populate and map pages [@page_start,@page_end) into
681 * @chunk. The area is cleared on return.
ccea34b5
TH
682 *
683 * CONTEXT:
684 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
fbf59bc9
TH
685 */
686static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
687{
688 const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
689 int page_start = PFN_DOWN(off);
690 int page_end = PFN_UP(off + size);
691 int map_start = -1;
02d51fdf 692 int uninitialized_var(map_end);
fbf59bc9
TH
693 unsigned int cpu;
694 int i;
695
696 for (i = page_start; i < page_end; i++) {
697 if (pcpu_chunk_page_occupied(chunk, i)) {
698 if (map_start >= 0) {
699 if (pcpu_map(chunk, map_start, map_end))
700 goto err;
701 map_start = -1;
702 }
703 continue;
704 }
705
706 map_start = map_start < 0 ? i : map_start;
707 map_end = i + 1;
708
709 for_each_possible_cpu(cpu) {
710 struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
711
712 *pagep = alloc_pages_node(cpu_to_node(cpu),
713 alloc_mask, 0);
714 if (!*pagep)
715 goto err;
e1b9aa3f 716 pcpu_set_page_chunk(*pagep, chunk);
fbf59bc9
TH
717 }
718 }
719
720 if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
721 goto err;
722
723 for_each_possible_cpu(cpu)
d9b55eeb 724 memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
fbf59bc9
TH
725 size);
726
727 return 0;
728err:
729 /* likely under heavy memory pressure, give memory back */
730 pcpu_depopulate_chunk(chunk, off, size, true);
731 return -ENOMEM;
732}
733
734static void free_pcpu_chunk(struct pcpu_chunk *chunk)
735{
736 if (!chunk)
737 return;
738 if (chunk->vm)
739 free_vm_area(chunk->vm);
1880d93b 740 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
fbf59bc9
TH
741 kfree(chunk);
742}
743
744static struct pcpu_chunk *alloc_pcpu_chunk(void)
745{
746 struct pcpu_chunk *chunk;
747
748 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
749 if (!chunk)
750 return NULL;
751
1880d93b 752 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
fbf59bc9
TH
753 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
754 chunk->map[chunk->map_used++] = pcpu_unit_size;
3e24aa58 755 chunk->page = chunk->page_ar;
fbf59bc9
TH
756
757 chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
758 if (!chunk->vm) {
759 free_pcpu_chunk(chunk);
760 return NULL;
761 }
762
763 INIT_LIST_HEAD(&chunk->list);
764 chunk->free_size = pcpu_unit_size;
765 chunk->contig_hint = pcpu_unit_size;
766
767 return chunk;
768}
769
770/**
edcb4639 771 * pcpu_alloc - the percpu allocator
cae3aeb8 772 * @size: size of area to allocate in bytes
fbf59bc9 773 * @align: alignment of area (max PAGE_SIZE)
edcb4639 774 * @reserved: allocate from the reserved chunk if available
fbf59bc9 775 *
ccea34b5
TH
776 * Allocate percpu area of @size bytes aligned at @align.
777 *
778 * CONTEXT:
779 * Does GFP_KERNEL allocation.
fbf59bc9
TH
780 *
781 * RETURNS:
782 * Percpu pointer to the allocated area on success, NULL on failure.
783 */
edcb4639 784static void *pcpu_alloc(size_t size, size_t align, bool reserved)
fbf59bc9 785{
fbf59bc9
TH
786 struct pcpu_chunk *chunk;
787 int slot, off;
788
8d408b4b 789 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
fbf59bc9
TH
790 WARN(true, "illegal size (%zu) or align (%zu) for "
791 "percpu allocation\n", size, align);
792 return NULL;
793 }
794
ccea34b5
TH
795 mutex_lock(&pcpu_alloc_mutex);
796 spin_lock_irq(&pcpu_lock);
fbf59bc9 797
edcb4639
TH
798 /* serve reserved allocations from the reserved chunk if available */
799 if (reserved && pcpu_reserved_chunk) {
800 chunk = pcpu_reserved_chunk;
9f7dcf22
TH
801 if (size > chunk->contig_hint ||
802 pcpu_extend_area_map(chunk) < 0)
ccea34b5 803 goto fail_unlock;
edcb4639
TH
804 off = pcpu_alloc_area(chunk, size, align);
805 if (off >= 0)
806 goto area_found;
ccea34b5 807 goto fail_unlock;
edcb4639
TH
808 }
809
ccea34b5 810restart:
edcb4639 811 /* search through normal chunks */
fbf59bc9
TH
812 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
813 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
814 if (size > chunk->contig_hint)
815 continue;
ccea34b5
TH
816
817 switch (pcpu_extend_area_map(chunk)) {
818 case 0:
819 break;
820 case 1:
821 goto restart; /* pcpu_lock dropped, restart */
822 default:
823 goto fail_unlock;
824 }
825
fbf59bc9
TH
826 off = pcpu_alloc_area(chunk, size, align);
827 if (off >= 0)
828 goto area_found;
fbf59bc9
TH
829 }
830 }
831
832 /* hmmm... no space left, create a new chunk */
ccea34b5
TH
833 spin_unlock_irq(&pcpu_lock);
834
fbf59bc9
TH
835 chunk = alloc_pcpu_chunk();
836 if (!chunk)
ccea34b5
TH
837 goto fail_unlock_mutex;
838
839 spin_lock_irq(&pcpu_lock);
fbf59bc9 840 pcpu_chunk_relocate(chunk, -1);
ccea34b5 841 goto restart;
fbf59bc9
TH
842
843area_found:
ccea34b5
TH
844 spin_unlock_irq(&pcpu_lock);
845
fbf59bc9
TH
846 /* populate, map and clear the area */
847 if (pcpu_populate_chunk(chunk, off, size)) {
ccea34b5 848 spin_lock_irq(&pcpu_lock);
fbf59bc9 849 pcpu_free_area(chunk, off);
ccea34b5 850 goto fail_unlock;
fbf59bc9
TH
851 }
852
ccea34b5
TH
853 mutex_unlock(&pcpu_alloc_mutex);
854
855 return __addr_to_pcpu_ptr(chunk->vm->addr + off);
856
857fail_unlock:
858 spin_unlock_irq(&pcpu_lock);
859fail_unlock_mutex:
860 mutex_unlock(&pcpu_alloc_mutex);
861 return NULL;
fbf59bc9 862}
edcb4639
TH
863
864/**
865 * __alloc_percpu - allocate dynamic percpu area
866 * @size: size of area to allocate in bytes
867 * @align: alignment of area (max PAGE_SIZE)
868 *
869 * Allocate percpu area of @size bytes aligned at @align. Might
870 * sleep. Might trigger writeouts.
871 *
ccea34b5
TH
872 * CONTEXT:
873 * Does GFP_KERNEL allocation.
874 *
edcb4639
TH
875 * RETURNS:
876 * Percpu pointer to the allocated area on success, NULL on failure.
877 */
878void *__alloc_percpu(size_t size, size_t align)
879{
880 return pcpu_alloc(size, align, false);
881}
fbf59bc9
TH
882EXPORT_SYMBOL_GPL(__alloc_percpu);
883
edcb4639
TH
884/**
885 * __alloc_reserved_percpu - allocate reserved percpu area
886 * @size: size of area to allocate in bytes
887 * @align: alignment of area (max PAGE_SIZE)
888 *
889 * Allocate percpu area of @size bytes aligned at @align from reserved
890 * percpu area if arch has set it up; otherwise, allocation is served
891 * from the same dynamic area. Might sleep. Might trigger writeouts.
892 *
ccea34b5
TH
893 * CONTEXT:
894 * Does GFP_KERNEL allocation.
895 *
edcb4639
TH
896 * RETURNS:
897 * Percpu pointer to the allocated area on success, NULL on failure.
898 */
899void *__alloc_reserved_percpu(size_t size, size_t align)
900{
901 return pcpu_alloc(size, align, true);
902}
903
a56dbddf
TH
904/**
905 * pcpu_reclaim - reclaim fully free chunks, workqueue function
906 * @work: unused
907 *
908 * Reclaim all fully free chunks except for the first one.
ccea34b5
TH
909 *
910 * CONTEXT:
911 * workqueue context.
a56dbddf
TH
912 */
913static void pcpu_reclaim(struct work_struct *work)
fbf59bc9 914{
a56dbddf
TH
915 LIST_HEAD(todo);
916 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
917 struct pcpu_chunk *chunk, *next;
918
ccea34b5
TH
919 mutex_lock(&pcpu_alloc_mutex);
920 spin_lock_irq(&pcpu_lock);
a56dbddf
TH
921
922 list_for_each_entry_safe(chunk, next, head, list) {
923 WARN_ON(chunk->immutable);
924
925 /* spare the first one */
926 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
927 continue;
928
a56dbddf
TH
929 list_move(&chunk->list, &todo);
930 }
931
ccea34b5
TH
932 spin_unlock_irq(&pcpu_lock);
933 mutex_unlock(&pcpu_alloc_mutex);
a56dbddf
TH
934
935 list_for_each_entry_safe(chunk, next, &todo, list) {
936 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
937 free_pcpu_chunk(chunk);
938 }
fbf59bc9
TH
939}
940
941/**
942 * free_percpu - free percpu area
943 * @ptr: pointer to area to free
944 *
ccea34b5
TH
945 * Free percpu area @ptr.
946 *
947 * CONTEXT:
948 * Can be called from atomic context.
fbf59bc9
TH
949 */
950void free_percpu(void *ptr)
951{
952 void *addr = __pcpu_ptr_to_addr(ptr);
953 struct pcpu_chunk *chunk;
ccea34b5 954 unsigned long flags;
fbf59bc9
TH
955 int off;
956
957 if (!ptr)
958 return;
959
ccea34b5 960 spin_lock_irqsave(&pcpu_lock, flags);
fbf59bc9
TH
961
962 chunk = pcpu_chunk_addr_search(addr);
963 off = addr - chunk->vm->addr;
964
965 pcpu_free_area(chunk, off);
966
a56dbddf 967 /* if there are more than one fully free chunks, wake up grim reaper */
fbf59bc9
TH
968 if (chunk->free_size == pcpu_unit_size) {
969 struct pcpu_chunk *pos;
970
a56dbddf 971 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
fbf59bc9 972 if (pos != chunk) {
a56dbddf 973 schedule_work(&pcpu_reclaim_work);
fbf59bc9
TH
974 break;
975 }
976 }
977
ccea34b5 978 spin_unlock_irqrestore(&pcpu_lock, flags);
fbf59bc9
TH
979}
980EXPORT_SYMBOL_GPL(free_percpu);
981
982/**
8d408b4b
TH
983 * pcpu_setup_first_chunk - initialize the first percpu chunk
984 * @get_page_fn: callback to fetch page pointer
985 * @static_size: the size of static percpu area in bytes
38a6be52 986 * @reserved_size: the size of reserved percpu area in bytes, 0 for none
cafe8816 987 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
38a6be52
TH
988 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE
989 * @base_addr: mapped address
8d408b4b
TH
990 *
991 * Initialize the first percpu chunk which contains the kernel static
992 * perpcu area. This function is to be called from arch percpu area
38a6be52 993 * setup path.
8d408b4b
TH
994 *
995 * @get_page_fn() should return pointer to percpu page given cpu
996 * number and page number. It should at least return enough pages to
997 * cover the static area. The returned pages for static area should
38a6be52
TH
998 * have been initialized with valid data. It can also return pages
999 * after the static area. NULL return indicates end of pages for the
1000 * cpu. Note that @get_page_fn() must return the same number of pages
1001 * for all cpus.
8d408b4b 1002 *
edcb4639
TH
1003 * @reserved_size, if non-zero, specifies the amount of bytes to
1004 * reserve after the static area in the first chunk. This reserves
1005 * the first chunk such that it's available only through reserved
1006 * percpu allocation. This is primarily used to serve module percpu
1007 * static areas on architectures where the addressing model has
1008 * limited offset range for symbol relocations to guarantee module
1009 * percpu symbols fall inside the relocatable range.
1010 *
6074d5b0
TH
1011 * @dyn_size, if non-negative, determines the number of bytes
1012 * available for dynamic allocation in the first chunk. Specifying
1013 * non-negative value makes percpu leave alone the area beyond
1014 * @static_size + @reserved_size + @dyn_size.
1015 *
38a6be52
TH
1016 * @unit_size specifies unit size and must be aligned to PAGE_SIZE and
1017 * equal to or larger than @static_size + @reserved_size + if
1018 * non-negative, @dyn_size.
8d408b4b 1019 *
38a6be52
TH
1020 * The caller should have mapped the first chunk at @base_addr and
1021 * copied static data to each unit.
fbf59bc9 1022 *
edcb4639
TH
1023 * If the first chunk ends up with both reserved and dynamic areas, it
1024 * is served by two chunks - one to serve the core static and reserved
1025 * areas and the other for the dynamic area. They share the same vm
1026 * and page map but uses different area allocation map to stay away
1027 * from each other. The latter chunk is circulated in the chunk slots
1028 * and available for dynamic allocation like any other chunks.
1029 *
fbf59bc9
TH
1030 * RETURNS:
1031 * The determined pcpu_unit_size which can be used to initialize
1032 * percpu access.
1033 */
8d408b4b 1034size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
edcb4639 1035 size_t static_size, size_t reserved_size,
38a6be52
TH
1036 ssize_t dyn_size, size_t unit_size,
1037 void *base_addr)
fbf59bc9 1038{
2441d15c 1039 static struct vm_struct first_vm;
edcb4639 1040 static int smap[2], dmap[2];
6074d5b0
TH
1041 size_t size_sum = static_size + reserved_size +
1042 (dyn_size >= 0 ? dyn_size : 0);
edcb4639 1043 struct pcpu_chunk *schunk, *dchunk = NULL;
fbf59bc9 1044 unsigned int cpu;
38a6be52 1045 int i, nr_pages;
fbf59bc9 1046
8d408b4b 1047 /* santiy checks */
edcb4639
TH
1048 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1049 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
8d408b4b 1050 BUG_ON(!static_size);
38a6be52
TH
1051 BUG_ON(!base_addr);
1052 BUG_ON(unit_size < size_sum);
1053 BUG_ON(unit_size & ~PAGE_MASK);
1054 BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
8d408b4b 1055
38a6be52 1056 pcpu_unit_pages = unit_size >> PAGE_SHIFT;
d9b55eeb 1057 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
fbf59bc9 1058 pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
fbf59bc9 1059 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
cb83b42e 1060 + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
fbf59bc9 1061
cafe8816 1062 if (dyn_size < 0)
edcb4639 1063 dyn_size = pcpu_unit_size - static_size - reserved_size;
cafe8816 1064
38a6be52
TH
1065 first_vm.flags = VM_ALLOC;
1066 first_vm.size = pcpu_chunk_size;
1067 first_vm.addr = base_addr;
1068
d9b55eeb
TH
1069 /*
1070 * Allocate chunk slots. The additional last slot is for
1071 * empty chunks.
1072 */
1073 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
fbf59bc9
TH
1074 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1075 for (i = 0; i < pcpu_nr_slots; i++)
1076 INIT_LIST_HEAD(&pcpu_slot[i]);
1077
edcb4639
TH
1078 /*
1079 * Initialize static chunk. If reserved_size is zero, the
1080 * static chunk covers static area + dynamic allocation area
1081 * in the first chunk. If reserved_size is not zero, it
1082 * covers static area + reserved area (mostly used for module
1083 * static percpu allocation).
1084 */
2441d15c
TH
1085 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1086 INIT_LIST_HEAD(&schunk->list);
1087 schunk->vm = &first_vm;
61ace7fa
TH
1088 schunk->map = smap;
1089 schunk->map_alloc = ARRAY_SIZE(smap);
3e24aa58 1090 schunk->page = schunk->page_ar;
38a6be52 1091 schunk->immutable = true;
edcb4639
TH
1092
1093 if (reserved_size) {
1094 schunk->free_size = reserved_size;
ae9e6bc9
TH
1095 pcpu_reserved_chunk = schunk;
1096 pcpu_reserved_chunk_limit = static_size + reserved_size;
edcb4639
TH
1097 } else {
1098 schunk->free_size = dyn_size;
1099 dyn_size = 0; /* dynamic area covered */
1100 }
2441d15c 1101 schunk->contig_hint = schunk->free_size;
fbf59bc9 1102
61ace7fa
TH
1103 schunk->map[schunk->map_used++] = -static_size;
1104 if (schunk->free_size)
1105 schunk->map[schunk->map_used++] = schunk->free_size;
1106
edcb4639
TH
1107 /* init dynamic chunk if necessary */
1108 if (dyn_size) {
1109 dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
1110 INIT_LIST_HEAD(&dchunk->list);
1111 dchunk->vm = &first_vm;
1112 dchunk->map = dmap;
1113 dchunk->map_alloc = ARRAY_SIZE(dmap);
1114 dchunk->page = schunk->page_ar; /* share page map with schunk */
38a6be52 1115 dchunk->immutable = true;
edcb4639
TH
1116
1117 dchunk->contig_hint = dchunk->free_size = dyn_size;
1118 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1119 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1120 }
1121
8d408b4b
TH
1122 /* assign pages */
1123 nr_pages = -1;
fbf59bc9 1124 for_each_possible_cpu(cpu) {
8d408b4b
TH
1125 for (i = 0; i < pcpu_unit_pages; i++) {
1126 struct page *page = get_page_fn(cpu, i);
1127
1128 if (!page)
1129 break;
2441d15c 1130 *pcpu_chunk_pagep(schunk, cpu, i) = page;
fbf59bc9 1131 }
8d408b4b 1132
61ace7fa 1133 BUG_ON(i < PFN_UP(static_size));
8d408b4b
TH
1134
1135 if (nr_pages < 0)
1136 nr_pages = i;
1137 else
1138 BUG_ON(nr_pages != i);
fbf59bc9
TH
1139 }
1140
2441d15c 1141 /* link the first chunk in */
ae9e6bc9
TH
1142 pcpu_first_chunk = dchunk ?: schunk;
1143 pcpu_chunk_relocate(pcpu_first_chunk, -1);
fbf59bc9
TH
1144
1145 /* we're done */
2441d15c 1146 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
fbf59bc9
TH
1147 return pcpu_unit_size;
1148}
66c3a757 1149
8c4bfc6e
TH
1150static size_t pcpu_calc_fc_sizes(size_t static_size, size_t reserved_size,
1151 ssize_t *dyn_sizep)
1152{
1153 size_t size_sum;
1154
1155 size_sum = PFN_ALIGN(static_size + reserved_size +
1156 (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1157 if (*dyn_sizep != 0)
1158 *dyn_sizep = size_sum - static_size - reserved_size;
1159
1160 return size_sum;
1161}
1162
66c3a757
TH
1163/*
1164 * Embedding first chunk setup helper.
1165 */
1166static void *pcpue_ptr __initdata;
1167static size_t pcpue_size __initdata;
1168static size_t pcpue_unit_size __initdata;
1169
1170static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1171{
1172 size_t off = (size_t)pageno << PAGE_SHIFT;
1173
1174 if (off >= pcpue_size)
1175 return NULL;
1176
1177 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
1178}
1179
1180/**
1181 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1182 * @static_size: the size of static percpu area in bytes
1183 * @reserved_size: the size of reserved percpu area in bytes
1184 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
66c3a757
TH
1185 *
1186 * This is a helper to ease setting up embedded first percpu chunk and
1187 * can be called where pcpu_setup_first_chunk() is expected.
1188 *
1189 * If this function is used to setup the first chunk, it is allocated
1190 * as a contiguous area using bootmem allocator and used as-is without
1191 * being mapped into vmalloc area. This enables the first chunk to
1192 * piggy back on the linear physical mapping which often uses larger
1193 * page size.
1194 *
1195 * When @dyn_size is positive, dynamic area might be larger than
788e5abc
TH
1196 * specified to fill page alignment. When @dyn_size is auto,
1197 * @dyn_size is just big enough to fill page alignment after static
1198 * and reserved areas.
66c3a757
TH
1199 *
1200 * If the needed size is smaller than the minimum or specified unit
1201 * size, the leftover is returned to the bootmem allocator.
1202 *
1203 * RETURNS:
1204 * The determined pcpu_unit_size which can be used to initialize
1205 * percpu access on success, -errno on failure.
1206 */
1207ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
788e5abc 1208 ssize_t dyn_size)
66c3a757 1209{
fa8a7094 1210 size_t chunk_size;
66c3a757
TH
1211 unsigned int cpu;
1212
1213 /* determine parameters and allocate */
8c4bfc6e 1214 pcpue_size = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
66c3a757 1215
788e5abc 1216 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
fa8a7094
TH
1217 chunk_size = pcpue_unit_size * num_possible_cpus();
1218
1219 pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
1220 __pa(MAX_DMA_ADDRESS));
1221 if (!pcpue_ptr) {
1222 pr_warning("PERCPU: failed to allocate %zu bytes for "
1223 "embedding\n", chunk_size);
66c3a757 1224 return -ENOMEM;
fa8a7094 1225 }
66c3a757
TH
1226
1227 /* return the leftover and copy */
1228 for_each_possible_cpu(cpu) {
1229 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
1230
1231 free_bootmem(__pa(ptr + pcpue_size),
1232 pcpue_unit_size - pcpue_size);
1233 memcpy(ptr, __per_cpu_load, static_size);
1234 }
1235
1236 /* we're ready, commit */
1237 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
1238 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
1239
1240 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
1241 reserved_size, dyn_size,
38a6be52 1242 pcpue_unit_size, pcpue_ptr);
66c3a757 1243}
e74e3962 1244
d4b95f80
TH
1245/*
1246 * 4k page first chunk setup helper.
1247 */
1248static struct page **pcpu4k_pages __initdata;
8f05a6a6 1249static int pcpu4k_unit_pages __initdata;
d4b95f80
TH
1250
1251static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
1252{
8f05a6a6
TH
1253 if (pageno < pcpu4k_unit_pages)
1254 return pcpu4k_pages[cpu * pcpu4k_unit_pages + pageno];
d4b95f80
TH
1255 return NULL;
1256}
1257
1258/**
1259 * pcpu_4k_first_chunk - map the first chunk using PAGE_SIZE pages
1260 * @static_size: the size of static percpu area in bytes
1261 * @reserved_size: the size of reserved percpu area in bytes
1262 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1263 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1264 * @populate_pte_fn: function to populate pte
1265 *
1266 * This is a helper to ease setting up embedded first percpu chunk and
1267 * can be called where pcpu_setup_first_chunk() is expected.
1268 *
1269 * This is the basic allocator. Static percpu area is allocated
1270 * page-by-page into vmalloc area.
1271 *
1272 * RETURNS:
1273 * The determined pcpu_unit_size which can be used to initialize
1274 * percpu access on success, -errno on failure.
1275 */
1276ssize_t __init pcpu_4k_first_chunk(size_t static_size, size_t reserved_size,
1277 pcpu_fc_alloc_fn_t alloc_fn,
1278 pcpu_fc_free_fn_t free_fn,
1279 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1280{
8f05a6a6 1281 static struct vm_struct vm;
d4b95f80
TH
1282 size_t pages_size;
1283 unsigned int cpu;
1284 int i, j;
1285 ssize_t ret;
1286
8f05a6a6
TH
1287 pcpu4k_unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size,
1288 PCPU_MIN_UNIT_SIZE));
d4b95f80
TH
1289
1290 /* unaligned allocations can't be freed, round up to page size */
8f05a6a6 1291 pages_size = PFN_ALIGN(pcpu4k_unit_pages * num_possible_cpus() *
d4b95f80
TH
1292 sizeof(pcpu4k_pages[0]));
1293 pcpu4k_pages = alloc_bootmem(pages_size);
1294
8f05a6a6 1295 /* allocate pages */
d4b95f80
TH
1296 j = 0;
1297 for_each_possible_cpu(cpu)
8f05a6a6 1298 for (i = 0; i < pcpu4k_unit_pages; i++) {
d4b95f80
TH
1299 void *ptr;
1300
1301 ptr = alloc_fn(cpu, PAGE_SIZE);
1302 if (!ptr) {
1303 pr_warning("PERCPU: failed to allocate "
1304 "4k page for cpu%u\n", cpu);
1305 goto enomem;
1306 }
d4b95f80
TH
1307 pcpu4k_pages[j++] = virt_to_page(ptr);
1308 }
1309
8f05a6a6
TH
1310 /* allocate vm area, map the pages and copy static data */
1311 vm.flags = VM_ALLOC;
1312 vm.size = num_possible_cpus() * pcpu4k_unit_pages << PAGE_SHIFT;
1313 vm_area_register_early(&vm, PAGE_SIZE);
1314
1315 for_each_possible_cpu(cpu) {
1316 unsigned long unit_addr = (unsigned long)vm.addr +
1317 (cpu * pcpu4k_unit_pages << PAGE_SHIFT);
1318
1319 for (i = 0; i < pcpu4k_unit_pages; i++)
1320 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1321
1322 /* pte already populated, the following shouldn't fail */
1323 ret = __pcpu_map_pages(unit_addr,
1324 &pcpu4k_pages[cpu * pcpu4k_unit_pages],
1325 pcpu4k_unit_pages);
1326 if (ret < 0)
1327 panic("failed to map percpu area, err=%zd\n", ret);
1328
1329 /*
1330 * FIXME: Archs with virtual cache should flush local
1331 * cache for the linear mapping here - something
1332 * equivalent to flush_cache_vmap() on the local cpu.
1333 * flush_cache_vmap() can't be used as most supporting
1334 * data structures are not set up yet.
1335 */
1336
1337 /* copy static data */
1338 memcpy((void *)unit_addr, __per_cpu_load, static_size);
1339 }
1340
d4b95f80 1341 /* we're ready, commit */
8f05a6a6
TH
1342 pr_info("PERCPU: %d 4k pages per cpu, static data %zu bytes\n",
1343 pcpu4k_unit_pages, static_size);
d4b95f80
TH
1344
1345 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
1346 reserved_size, -1,
38a6be52 1347 pcpu4k_unit_pages << PAGE_SHIFT, vm.addr);
d4b95f80
TH
1348 goto out_free_ar;
1349
1350enomem:
1351 while (--j >= 0)
1352 free_fn(page_address(pcpu4k_pages[j]), PAGE_SIZE);
1353 ret = -ENOMEM;
1354out_free_ar:
1355 free_bootmem(__pa(pcpu4k_pages), pages_size);
1356 return ret;
1357}
1358
8c4bfc6e
TH
1359/*
1360 * Large page remapping first chunk setup helper
1361 */
1362#ifdef CONFIG_NEED_MULTIPLE_NODES
1363struct pcpul_ent {
1364 unsigned int cpu;
1365 void *ptr;
1366};
1367
1368static size_t pcpul_size;
1369static size_t pcpul_unit_size;
1370static struct pcpul_ent *pcpul_map;
1371static struct vm_struct pcpul_vm;
1372
1373static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
1374{
1375 size_t off = (size_t)pageno << PAGE_SHIFT;
1376
1377 if (off >= pcpul_size)
1378 return NULL;
1379
1380 return virt_to_page(pcpul_map[cpu].ptr + off);
1381}
1382
1383/**
1384 * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
1385 * @static_size: the size of static percpu area in bytes
1386 * @reserved_size: the size of reserved percpu area in bytes
1387 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1388 * @lpage_size: the size of a large page
1389 * @alloc_fn: function to allocate percpu lpage, always called with lpage_size
1390 * @free_fn: function to free percpu memory, @size <= lpage_size
1391 * @map_fn: function to map percpu lpage, always called with lpage_size
1392 *
1393 * This allocator uses large page as unit. A large page is allocated
1394 * for each cpu and each is remapped into vmalloc area using large
1395 * page mapping. As large page can be quite large, only part of it is
1396 * used for the first chunk. Unused part is returned to the bootmem
1397 * allocator.
1398 *
1399 * So, the large pages are mapped twice - once to the physical mapping
1400 * and to the vmalloc area for the first percpu chunk. The double
1401 * mapping does add one more large TLB entry pressure but still is
1402 * much better than only using 4k mappings while still being NUMA
1403 * friendly.
1404 *
1405 * RETURNS:
1406 * The determined pcpu_unit_size which can be used to initialize
1407 * percpu access on success, -errno on failure.
1408 */
1409ssize_t __init pcpu_lpage_first_chunk(size_t static_size, size_t reserved_size,
1410 ssize_t dyn_size, size_t lpage_size,
1411 pcpu_fc_alloc_fn_t alloc_fn,
1412 pcpu_fc_free_fn_t free_fn,
1413 pcpu_fc_map_fn_t map_fn)
1414{
1415 size_t size_sum;
1416 size_t map_size;
1417 unsigned int cpu;
1418 int i, j;
1419 ssize_t ret;
1420
1421 /*
1422 * Currently supports only single page. Supporting multiple
1423 * pages won't be too difficult if it ever becomes necessary.
1424 */
1425 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1426
1427 pcpul_unit_size = lpage_size;
1428 pcpul_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1429 if (pcpul_size > pcpul_unit_size) {
1430 pr_warning("PERCPU: static data is larger than large page, "
1431 "can't use large page\n");
1432 return -EINVAL;
1433 }
1434
1435 /* allocate pointer array and alloc large pages */
1436 map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
1437 pcpul_map = alloc_bootmem(map_size);
1438
1439 for_each_possible_cpu(cpu) {
1440 void *ptr;
1441
1442 ptr = alloc_fn(cpu, lpage_size);
1443 if (!ptr) {
1444 pr_warning("PERCPU: failed to allocate large page "
1445 "for cpu%u\n", cpu);
1446 goto enomem;
1447 }
1448
1449 /*
1450 * Only use pcpul_size bytes and give back the rest.
1451 *
1452 * Ingo: The lpage_size up-rounding bootmem is needed
1453 * to make sure the partial lpage is still fully RAM -
1454 * it's not well-specified to have a incompatible area
1455 * (unmapped RAM, device memory, etc.) in that hole.
1456 */
1457 free_fn(ptr + pcpul_size, lpage_size - pcpul_size);
1458
1459 pcpul_map[cpu].cpu = cpu;
1460 pcpul_map[cpu].ptr = ptr;
1461
1462 memcpy(ptr, __per_cpu_load, static_size);
1463 }
1464
1465 /* allocate address and map */
1466 pcpul_vm.flags = VM_ALLOC;
1467 pcpul_vm.size = num_possible_cpus() * pcpul_unit_size;
1468 vm_area_register_early(&pcpul_vm, pcpul_unit_size);
1469
1470 for_each_possible_cpu(cpu)
1471 map_fn(pcpul_map[cpu].ptr, pcpul_unit_size,
1472 pcpul_vm.addr + cpu * pcpul_unit_size);
1473
1474 /* we're ready, commit */
1475 pr_info("PERCPU: Remapped at %p with large pages, static data "
1476 "%zu bytes\n", pcpul_vm.addr, static_size);
1477
1478 ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
1479 reserved_size, dyn_size, pcpul_unit_size,
38a6be52 1480 pcpul_vm.addr);
8c4bfc6e
TH
1481
1482 /* sort pcpul_map array for pcpu_lpage_remapped() */
1483 for (i = 0; i < num_possible_cpus() - 1; i++)
1484 for (j = i + 1; j < num_possible_cpus(); j++)
1485 if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
1486 struct pcpul_ent tmp = pcpul_map[i];
1487 pcpul_map[i] = pcpul_map[j];
1488 pcpul_map[j] = tmp;
1489 }
1490
1491 return ret;
1492
1493enomem:
1494 for_each_possible_cpu(cpu)
1495 if (pcpul_map[cpu].ptr)
1496 free_fn(pcpul_map[cpu].ptr, pcpul_size);
1497 free_bootmem(__pa(pcpul_map), map_size);
1498 return -ENOMEM;
1499}
1500
1501/**
1502 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
1503 * @kaddr: the kernel address in question
1504 *
1505 * Determine whether @kaddr falls in the pcpul recycled area. This is
1506 * used by pageattr to detect VM aliases and break up the pcpu large
1507 * page mapping such that the same physical page is not mapped under
1508 * different attributes.
1509 *
1510 * The recycled area is always at the tail of a partially used large
1511 * page.
1512 *
1513 * RETURNS:
1514 * Address of corresponding remapped pcpu address if match is found;
1515 * otherwise, NULL.
1516 */
1517void *pcpu_lpage_remapped(void *kaddr)
1518{
1519 unsigned long unit_mask = pcpul_unit_size - 1;
1520 void *lpage_addr = (void *)((unsigned long)kaddr & ~unit_mask);
1521 unsigned long offset = (unsigned long)kaddr & unit_mask;
1522 int left = 0, right = num_possible_cpus() - 1;
1523 int pos;
1524
1525 /* pcpul in use at all? */
1526 if (!pcpul_map)
1527 return NULL;
1528
1529 /* okay, perform binary search */
1530 while (left <= right) {
1531 pos = (left + right) / 2;
1532
1533 if (pcpul_map[pos].ptr < lpage_addr)
1534 left = pos + 1;
1535 else if (pcpul_map[pos].ptr > lpage_addr)
1536 right = pos - 1;
1537 else {
1538 /* it shouldn't be in the area for the first chunk */
1539 WARN_ON(offset < pcpul_size);
1540
1541 return pcpul_vm.addr +
1542 pcpul_map[pos].cpu * pcpul_unit_size + offset;
1543 }
1544 }
1545
1546 return NULL;
1547}
1548#endif
1549
e74e3962
TH
1550/*
1551 * Generic percpu area setup.
1552 *
1553 * The embedding helper is used because its behavior closely resembles
1554 * the original non-dynamic generic percpu area setup. This is
1555 * important because many archs have addressing restrictions and might
1556 * fail if the percpu area is located far away from the previous
1557 * location. As an added bonus, in non-NUMA cases, embedding is
1558 * generally a good idea TLB-wise because percpu area can piggy back
1559 * on the physical linear memory mapping which uses large page
1560 * mappings on applicable archs.
1561 */
1562#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1563unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1564EXPORT_SYMBOL(__per_cpu_offset);
1565
1566void __init setup_per_cpu_areas(void)
1567{
1568 size_t static_size = __per_cpu_end - __per_cpu_start;
1569 ssize_t unit_size;
1570 unsigned long delta;
1571 unsigned int cpu;
1572
1573 /*
1574 * Always reserve area for module percpu variables. That's
1575 * what the legacy allocator did.
1576 */
1577 unit_size = pcpu_embed_first_chunk(static_size, PERCPU_MODULE_RESERVE,
788e5abc 1578 PERCPU_DYNAMIC_RESERVE);
e74e3962
TH
1579 if (unit_size < 0)
1580 panic("Failed to initialized percpu areas.");
1581
1582 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1583 for_each_possible_cpu(cpu)
1584 __per_cpu_offset[cpu] = delta + cpu * unit_size;
1585}
1586#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */