]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/allocpercpu.c
be2net:Creating/destroying queues regardless of netif_running() in suspend/resume...
[net-next-2.6.git] / mm / allocpercpu.c
CommitLineData
d00bcc98
CL
1/*
2 * linux/mm/allocpercpu.c
3 *
cde53535 4 * Separated from slab.c August 11, 2006 Christoph Lameter
d00bcc98
CL
5 */
6#include <linux/mm.h>
7#include <linux/module.h>
8
be852795
ED
9#ifndef cache_line_size
10#define cache_line_size() L1_CACHE_BYTES
11#endif
12
d00bcc98
CL
13/**
14 * percpu_depopulate - depopulate per-cpu data for given cpu
15 * @__pdata: per-cpu data to depopulate
16 * @cpu: depopulate per-cpu data for this cpu
17 *
18 * Depopulating per-cpu data for a cpu going offline would be a typical
19 * use case. You need to register a cpu hotplug handler for that purpose.
20 */
9d8fddfb 21static void percpu_depopulate(void *__pdata, int cpu)
d00bcc98
CL
22{
23 struct percpu_data *pdata = __percpu_disguise(__pdata);
a1205868
AS
24
25 kfree(pdata->ptrs[cpu]);
26 pdata->ptrs[cpu] = NULL;
d00bcc98 27}
d00bcc98
CL
28
29/**
30 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
31 * @__pdata: per-cpu data to depopulate
32 * @mask: depopulate per-cpu data for cpu's selected through mask bits
33 */
5d6700ea 34static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
d00bcc98
CL
35{
36 int cpu;
6d6a4360 37 for_each_cpu_mask_nr(cpu, *mask)
d00bcc98
CL
38 percpu_depopulate(__pdata, cpu);
39}
9d8fddfb
AB
40
41#define percpu_depopulate_mask(__pdata, mask) \
42 __percpu_depopulate_mask((__pdata), &(mask))
d00bcc98
CL
43
44/**
45 * percpu_populate - populate per-cpu data for given cpu
46 * @__pdata: per-cpu data to populate further
47 * @size: size of per-cpu object
48 * @gfp: may sleep or not etc.
49 * @cpu: populate per-data for this cpu
50 *
51 * Populating per-cpu data for a cpu coming online would be a typical
52 * use case. You need to register a cpu hotplug handler for that purpose.
53 * Per-cpu object is populated with zeroed buffer.
54 */
9d8fddfb 55static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
d00bcc98
CL
56{
57 struct percpu_data *pdata = __percpu_disguise(__pdata);
58 int node = cpu_to_node(cpu);
59
be852795
ED
60 /*
61 * We should make sure each CPU gets private memory.
62 */
63 size = roundup(size, cache_line_size());
64
d00bcc98 65 BUG_ON(pdata->ptrs[cpu]);
94f6030c
CL
66 if (node_online(node))
67 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
68 else
d00bcc98
CL
69 pdata->ptrs[cpu] = kzalloc(size, gfp);
70 return pdata->ptrs[cpu];
71}
d00bcc98
CL
72
73/**
74 * percpu_populate_mask - populate per-cpu data for more cpu's
75 * @__pdata: per-cpu data to populate further
76 * @size: size of per-cpu object
77 * @gfp: may sleep or not etc.
78 * @mask: populate per-cpu data for cpu's selected through mask bits
79 *
80 * Per-cpu objects are populated with zeroed buffers.
81 */
9d8fddfb
AB
82static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
83 cpumask_t *mask)
d00bcc98 84{
d366f8cb 85 cpumask_t populated;
d00bcc98
CL
86 int cpu;
87
d366f8cb 88 cpus_clear(populated);
6d6a4360 89 for_each_cpu_mask_nr(cpu, *mask)
d00bcc98
CL
90 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
91 __percpu_depopulate_mask(__pdata, &populated);
92 return -ENOMEM;
93 } else
94 cpu_set(cpu, populated);
95 return 0;
96}
9d8fddfb
AB
97
98#define percpu_populate_mask(__pdata, size, gfp, mask) \
99 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
d00bcc98
CL
100
101/**
f2a8205c 102 * alloc_percpu - initial setup of per-cpu data
d00bcc98 103 * @size: size of per-cpu object
f2a8205c 104 * @align: alignment
d00bcc98 105 *
f2a8205c
TH
106 * Allocate dynamic percpu area. Percpu objects are populated with
107 * zeroed buffers.
d00bcc98 108 */
f2a8205c 109void *__alloc_percpu(size_t size, size_t align)
d00bcc98 110{
be852795
ED
111 /*
112 * We allocate whole cache lines to avoid false sharing
113 */
114 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
f2a8205c 115 void *pdata = kzalloc(sz, GFP_KERNEL);
d00bcc98
CL
116 void *__pdata = __percpu_disguise(pdata);
117
f2a8205c
TH
118 /*
119 * Can't easily make larger alignment work with kmalloc. WARN
120 * on it. Larger alignment should only be used for module
121 * percpu sections on SMP for which this path isn't used.
122 */
60db5642 123 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
f2a8205c 124
d00bcc98
CL
125 if (unlikely(!pdata))
126 return NULL;
f2a8205c
TH
127 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
128 &cpu_possible_map)))
d00bcc98
CL
129 return __pdata;
130 kfree(pdata);
131 return NULL;
132}
f2a8205c 133EXPORT_SYMBOL_GPL(__alloc_percpu);
d00bcc98
CL
134
135/**
f2a8205c 136 * free_percpu - final cleanup of per-cpu data
d00bcc98
CL
137 * @__pdata: object to clean up
138 *
139 * We simply clean up any per-cpu object left. No need for the client to
140 * track and specify through a bis mask which per-cpu objects are to free.
141 */
f2a8205c 142void free_percpu(void *__pdata)
d00bcc98 143{
a1205868
AS
144 if (unlikely(!__pdata))
145 return;
aa85ea5b 146 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
d00bcc98
CL
147 kfree(__percpu_disguise(__pdata));
148}
f2a8205c 149EXPORT_SYMBOL_GPL(free_percpu);