]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kernel/cpu/intel_cacheinfo.c
x86, cacheinfo: Make L3 cache info per node
[net-next-2.6.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
CommitLineData
1da177e4 1/*
cdcf772e 2 * Routines to indentify caches on Intel CPU.
1da177e4 3 *
cdcf772e
IM
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
8bdbd962 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
67cddd94 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
1da177e4
LT
8 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
4e57b681 15#include <linux/sched.h>
a24e8d36 16#include <linux/pci.h>
1da177e4
LT
17
18#include <asm/processor.h>
8bdbd962 19#include <linux/smp.h>
afd9fcee 20#include <asm/k8.h>
dcf39daf 21#include <asm/smp.h>
1da177e4
LT
22
23#define LVL_1_INST 1
24#define LVL_1_DATA 2
25#define LVL_2 3
26#define LVL_3 4
27#define LVL_TRACE 5
28
8bdbd962 29struct _cache_table {
1da177e4
LT
30 unsigned char descriptor;
31 char cache_type;
32 short size;
33};
34
2ca49b2f
DJ
35#define MB(x) ((x) * 1024)
36
8bdbd962
AC
37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
02dde8b4 40static const struct _cache_table __cpuinitconst cache_table[] =
1da177e4
LT
41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8 44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
1da177e4
LT
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
9a8ecae8
DJ
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
1da177e4 49 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
2ca49b2f
DJ
50 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
51 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
53 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
54 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479 56 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
1da177e4
LT
57 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
58 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
6fe8f479
DJ
59 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
60 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
04fa11ea 61 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
1da177e4
LT
62 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
63 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
64 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
2ca49b2f
DJ
65 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
66 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
67 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
68 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
69 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
70 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
71 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
75 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
77 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
78 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
80 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
81 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
6fe8f479 82 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
2ca49b2f
DJ
83 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
84 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
85 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
86 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
89 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
90 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
91 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
92 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
93 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
94 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
95 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
96 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
97 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
98 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
99 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
100 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
101 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
102 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
103 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
104 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
105 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
106 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
107 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
108 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
109 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
110 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
1da177e4
LT
111 { 0x00, 0, 0}
112};
113
114
8bdbd962 115enum _cache_type {
1da177e4
LT
116 CACHE_TYPE_NULL = 0,
117 CACHE_TYPE_DATA = 1,
118 CACHE_TYPE_INST = 2,
119 CACHE_TYPE_UNIFIED = 3
120};
121
122union _cpuid4_leaf_eax {
123 struct {
124 enum _cache_type type:5;
125 unsigned int level:3;
126 unsigned int is_self_initializing:1;
127 unsigned int is_fully_associative:1;
128 unsigned int reserved:4;
129 unsigned int num_threads_sharing:12;
130 unsigned int num_cores_on_die:6;
131 } split;
132 u32 full;
133};
134
135union _cpuid4_leaf_ebx {
136 struct {
137 unsigned int coherency_line_size:12;
138 unsigned int physical_line_partition:10;
139 unsigned int ways_of_associativity:10;
140 } split;
141 u32 full;
142};
143
144union _cpuid4_leaf_ecx {
145 struct {
146 unsigned int number_of_sets:32;
147 } split;
148 u32 full;
149};
150
9350f982
BP
151struct amd_l3_cache {
152 struct pci_dev *dev;
153 bool can_disable;
154 unsigned indices;
155 u8 subcaches[4];
156};
157
1da177e4
LT
158struct _cpuid4_info {
159 union _cpuid4_leaf_eax eax;
160 union _cpuid4_leaf_ebx ebx;
161 union _cpuid4_leaf_ecx ecx;
162 unsigned long size;
9350f982 163 struct amd_l3_cache *l3;
f9b90566
MT
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
165};
166
167/* subset of above _cpuid4_info w/o shared_cpu_map */
168struct _cpuid4_info_regs {
169 union _cpuid4_leaf_eax eax;
170 union _cpuid4_leaf_ebx ebx;
171 union _cpuid4_leaf_ecx ecx;
172 unsigned long size;
9350f982 173 struct amd_l3_cache *l3;
1da177e4
LT
174};
175
240cd6a8
AK
176unsigned short num_cache_leaves;
177
178/* AMD doesn't have CPUID4. Emulate it here to report the same
179 information to the user. This makes some assumptions about the machine:
67cddd94 180 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
240cd6a8
AK
181
182 In theory the TLBs could be reported as fake type (they are in "dummy").
183 Maybe later */
184union l1_cache {
185 struct {
8bdbd962
AC
186 unsigned line_size:8;
187 unsigned lines_per_tag:8;
188 unsigned assoc:8;
189 unsigned size_in_kb:8;
240cd6a8
AK
190 };
191 unsigned val;
192};
193
194union l2_cache {
195 struct {
8bdbd962
AC
196 unsigned line_size:8;
197 unsigned lines_per_tag:4;
198 unsigned assoc:4;
199 unsigned size_in_kb:16;
240cd6a8
AK
200 };
201 unsigned val;
202};
203
67cddd94
AK
204union l3_cache {
205 struct {
8bdbd962
AC
206 unsigned line_size:8;
207 unsigned lines_per_tag:4;
208 unsigned assoc:4;
209 unsigned res:2;
210 unsigned size_encoded:14;
67cddd94
AK
211 };
212 unsigned val;
213};
214
02dde8b4 215static const unsigned short __cpuinitconst assocs[] = {
6265ff19
AH
216 [1] = 1,
217 [2] = 2,
218 [4] = 4,
219 [6] = 8,
220 [8] = 16,
221 [0xa] = 32,
222 [0xb] = 48,
67cddd94 223 [0xc] = 64,
6265ff19
AH
224 [0xd] = 96,
225 [0xe] = 128,
226 [0xf] = 0xffff /* fully associative - no way to show this currently */
67cddd94
AK
227};
228
02dde8b4
JB
229static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
230static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
240cd6a8 231
cdcf772e
IM
232static void __cpuinit
233amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
234 union _cpuid4_leaf_ebx *ebx,
235 union _cpuid4_leaf_ecx *ecx)
240cd6a8
AK
236{
237 unsigned dummy;
238 unsigned line_size, lines_per_tag, assoc, size_in_kb;
239 union l1_cache l1i, l1d;
240 union l2_cache l2;
67cddd94
AK
241 union l3_cache l3;
242 union l1_cache *l1 = &l1d;
240cd6a8
AK
243
244 eax->full = 0;
245 ebx->full = 0;
246 ecx->full = 0;
247
248 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
67cddd94 249 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
240cd6a8 250
67cddd94
AK
251 switch (leaf) {
252 case 1:
253 l1 = &l1i;
254 case 0:
255 if (!l1->val)
256 return;
a326e948 257 assoc = assocs[l1->assoc];
240cd6a8
AK
258 line_size = l1->line_size;
259 lines_per_tag = l1->lines_per_tag;
260 size_in_kb = l1->size_in_kb;
67cddd94
AK
261 break;
262 case 2:
263 if (!l2.val)
264 return;
a326e948 265 assoc = assocs[l2.assoc];
240cd6a8
AK
266 line_size = l2.line_size;
267 lines_per_tag = l2.lines_per_tag;
268 /* cpu_data has errata corrections for K7 applied */
269 size_in_kb = current_cpu_data.x86_cache_size;
67cddd94
AK
270 break;
271 case 3:
272 if (!l3.val)
273 return;
a326e948 274 assoc = assocs[l3.assoc];
67cddd94
AK
275 line_size = l3.line_size;
276 lines_per_tag = l3.lines_per_tag;
277 size_in_kb = l3.size_encoded * 512;
a326e948
AH
278 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
279 size_in_kb = size_in_kb >> 1;
280 assoc = assoc >> 1;
281 }
67cddd94
AK
282 break;
283 default:
284 return;
240cd6a8
AK
285 }
286
67cddd94
AK
287 eax->split.is_self_initializing = 1;
288 eax->split.type = types[leaf];
289 eax->split.level = levels[leaf];
a326e948 290 eax->split.num_threads_sharing = 0;
67cddd94
AK
291 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
292
293
a326e948 294 if (assoc == 0xffff)
240cd6a8
AK
295 eax->split.is_fully_associative = 1;
296 ebx->split.coherency_line_size = line_size - 1;
a326e948 297 ebx->split.ways_of_associativity = assoc - 1;
240cd6a8
AK
298 ebx->split.physical_line_partition = lines_per_tag - 1;
299 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
300 (ebx->split.ways_of_associativity + 1) - 1;
301}
1da177e4 302
cb19060a
BP
303struct _cache_attr {
304 struct attribute attr;
305 ssize_t (*show)(struct _cpuid4_info *, char *);
306 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
307};
308
309#ifdef CONFIG_CPU_SUP_AMD
ba06edb6
BP
310
311/*
312 * L3 cache descriptors
313 */
314static struct amd_l3_cache **__cpuinitdata l3_caches;
315
9350f982 316static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
048a8774 317{
048a8774 318 unsigned int sc0, sc1, sc2, sc3;
cb19060a 319 u32 val = 0;
048a8774 320
ba06edb6 321 pci_read_config_dword(l3->dev, 0x1C4, &val);
048a8774
BP
322
323 /* calculate subcache sizes */
9350f982
BP
324 l3->subcaches[0] = sc0 = !(val & BIT(0));
325 l3->subcaches[1] = sc1 = !(val & BIT(4));
326 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
327 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
328
329 l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
ba06edb6
BP
330}
331
332static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
333{
334 struct amd_l3_cache *l3;
335 struct pci_dev *dev = node_to_k8_nb_misc(node);
336
337 l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
338 if (!l3) {
339 printk(KERN_WARNING "Error allocating L3 struct\n");
340 return NULL;
341 }
048a8774 342
9350f982 343 l3->dev = dev;
ba06edb6
BP
344
345 amd_calc_l3_indices(l3);
346
347 return l3;
048a8774
BP
348}
349
7a4983bb 350static void __cpuinit
f9b90566 351amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
8cb22bcb 352{
ba06edb6
BP
353 int node;
354
b1ab1b4d 355 if (boot_cpu_data.x86 != 0x10)
8cb22bcb 356 return;
bda869c6 357
b1ab1b4d 358 if (index < 3)
bda869c6
AH
359 return;
360
dcf39daf 361 /* see errata #382 and #388 */
b1ab1b4d 362 if (boot_cpu_data.x86_model < 0x8)
bda869c6
AH
363 return;
364
b1ab1b4d
BP
365 if ((boot_cpu_data.x86_model == 0x8 ||
366 boot_cpu_data.x86_model == 0x9)
367 &&
368 boot_cpu_data.x86_mask < 0x1)
369 return;
370
f2b20e41
FA
371 /* not in virtualized environments */
372 if (num_k8_northbridges == 0)
373 return;
374
ba06edb6
BP
375 /*
376 * Strictly speaking, the amount in @size below is leaked since it is
377 * never freed but this is done only on shutdown so it doesn't matter.
378 */
379 if (!l3_caches) {
380 int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
381
382 l3_caches = kzalloc(size, GFP_ATOMIC);
383 if (!l3_caches)
384 return;
9350f982
BP
385 }
386
ba06edb6
BP
387 node = amd_get_nb_id(smp_processor_id());
388
389 if (!l3_caches[node]) {
390 l3_caches[node] = amd_init_l3_cache(node);
391 l3_caches[node]->can_disable = true;
392 }
393
394 WARN_ON(!l3_caches[node]);
395
396 this_leaf->l3 = l3_caches[node];
8cb22bcb
ML
397}
398
cb19060a
BP
399static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
400 unsigned int index)
401{
9350f982 402 struct pci_dev *dev = this_leaf->l3->dev;
cb19060a
BP
403 unsigned int reg = 0;
404
9350f982 405 if (!this_leaf->l3 || !this_leaf->l3->can_disable)
cb19060a
BP
406 return -EINVAL;
407
408 if (!dev)
409 return -EINVAL;
410
411 pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
412 return sprintf(buf, "0x%08x\n", reg);
413}
414
415#define SHOW_CACHE_DISABLE(index) \
416static ssize_t \
417show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
418{ \
419 return show_cache_disable(this_leaf, buf, index); \
420}
421SHOW_CACHE_DISABLE(0)
422SHOW_CACHE_DISABLE(1)
423
424static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
425 const char *buf, size_t count, unsigned int index)
426{
9350f982 427 struct pci_dev *dev = this_leaf->l3->dev;
cb19060a 428 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
cb19060a
BP
429 unsigned long val = 0;
430
431#define SUBCACHE_MASK (3UL << 20)
432#define SUBCACHE_INDEX 0xfff
433
9350f982 434 if (!this_leaf->l3 || !this_leaf->l3->can_disable)
cb19060a
BP
435 return -EINVAL;
436
437 if (!capable(CAP_SYS_ADMIN))
438 return -EPERM;
439
440 if (!dev)
441 return -EINVAL;
442
443 if (strict_strtoul(buf, 10, &val) < 0)
444 return -EINVAL;
445
446 /* do not allow writes outside of allowed bits */
447 if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
9350f982 448 ((val & SUBCACHE_INDEX) > this_leaf->l3->indices))
cb19060a
BP
449 return -EINVAL;
450
451 val |= BIT(30);
452 pci_write_config_dword(dev, 0x1BC + index * 4, val);
453 /*
454 * We need to WBINVD on a core on the node containing the L3 cache which
455 * indices we disable therefore a simple wbinvd() is not sufficient.
456 */
457 wbinvd_on_cpu(cpu);
458 pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
459 return count;
460}
461
462#define STORE_CACHE_DISABLE(index) \
463static ssize_t \
464store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
465 const char *buf, size_t count) \
466{ \
467 return store_cache_disable(this_leaf, buf, count, index); \
8cb22bcb 468}
cb19060a
BP
469STORE_CACHE_DISABLE(0)
470STORE_CACHE_DISABLE(1)
471
472static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
473 show_cache_disable_0, store_cache_disable_0);
474static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
475 show_cache_disable_1, store_cache_disable_1);
476
477#else /* CONFIG_CPU_SUP_AMD */
478static void __cpuinit
479amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
480{
481};
482#endif /* CONFIG_CPU_SUP_AMD */
8cb22bcb 483
7a4983bb 484static int
f9b90566
MT
485__cpuinit cpuid4_cache_lookup_regs(int index,
486 struct _cpuid4_info_regs *this_leaf)
1da177e4 487{
240cd6a8
AK
488 union _cpuid4_leaf_eax eax;
489 union _cpuid4_leaf_ebx ebx;
490 union _cpuid4_leaf_ecx ecx;
491 unsigned edx;
1da177e4 492
8cb22bcb 493 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
240cd6a8 494 amd_cpuid4(index, &eax, &ebx, &ecx);
b1ab1b4d 495 amd_check_l3_disable(index, this_leaf);
7a4983bb
IM
496 } else {
497 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
498 }
499
240cd6a8 500 if (eax.split.type == CACHE_TYPE_NULL)
e2cac789 501 return -EIO; /* better error ? */
1da177e4 502
240cd6a8
AK
503 this_leaf->eax = eax;
504 this_leaf->ebx = ebx;
505 this_leaf->ecx = ecx;
7a4983bb
IM
506 this_leaf->size = (ecx.split.number_of_sets + 1) *
507 (ebx.split.coherency_line_size + 1) *
508 (ebx.split.physical_line_partition + 1) *
509 (ebx.split.ways_of_associativity + 1);
1da177e4
LT
510 return 0;
511}
512
61d488da 513static int __cpuinit find_num_cache_leaves(void)
1da177e4
LT
514{
515 unsigned int eax, ebx, ecx, edx;
516 union _cpuid4_leaf_eax cache_eax;
d16aafff 517 int i = -1;
1da177e4 518
d16aafff
SS
519 do {
520 ++i;
521 /* Do cpuid(4) loop to find out num_cache_leaves */
1da177e4
LT
522 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
523 cache_eax.full = eax;
d16aafff
SS
524 } while (cache_eax.split.type != CACHE_TYPE_NULL);
525 return i;
1da177e4
LT
526}
527
1aa1a9f9 528unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
1da177e4 529{
8bdbd962
AC
530 /* Cache sizes */
531 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
1da177e4
LT
532 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
533 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
1e9f28fa 534 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
96c52749 535#ifdef CONFIG_X86_HT
92cb7612 536 unsigned int cpu = c->cpu_index;
1e9f28fa 537#endif
1da177e4 538
f2d0d263 539 if (c->cpuid_level > 3) {
1da177e4
LT
540 static int is_initialized;
541
542 if (is_initialized == 0) {
543 /* Init num_cache_leaves from boot CPU */
544 num_cache_leaves = find_num_cache_leaves();
545 is_initialized++;
546 }
547
548 /*
549 * Whenever possible use cpuid(4), deterministic cache
550 * parameters cpuid leaf to find the cache details
551 */
552 for (i = 0; i < num_cache_leaves; i++) {
f9b90566 553 struct _cpuid4_info_regs this_leaf;
1da177e4
LT
554 int retval;
555
f9b90566 556 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
1da177e4 557 if (retval >= 0) {
8bdbd962
AC
558 switch (this_leaf.eax.split.level) {
559 case 1:
1da177e4
LT
560 if (this_leaf.eax.split.type ==
561 CACHE_TYPE_DATA)
562 new_l1d = this_leaf.size/1024;
563 else if (this_leaf.eax.split.type ==
564 CACHE_TYPE_INST)
565 new_l1i = this_leaf.size/1024;
566 break;
8bdbd962 567 case 2:
1da177e4 568 new_l2 = this_leaf.size/1024;
1e9f28fa
SS
569 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
570 index_msb = get_count_order(num_threads_sharing);
571 l2_id = c->apicid >> index_msb;
1da177e4 572 break;
8bdbd962 573 case 3:
1da177e4 574 new_l3 = this_leaf.size/1024;
1e9f28fa 575 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
8bdbd962
AC
576 index_msb = get_count_order(
577 num_threads_sharing);
1e9f28fa 578 l3_id = c->apicid >> index_msb;
1da177e4 579 break;
8bdbd962 580 default:
1da177e4
LT
581 break;
582 }
583 }
584 }
585 }
b06be912
SL
586 /*
587 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
588 * trace cache
589 */
590 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
1da177e4 591 /* supports eax=2 call */
c1666e66
HH
592 int j, n;
593 unsigned int regs[4];
1da177e4 594 unsigned char *dp = (unsigned char *)regs;
b06be912
SL
595 int only_trace = 0;
596
597 if (num_cache_leaves != 0 && c->x86 == 15)
598 only_trace = 1;
1da177e4
LT
599
600 /* Number of times to iterate */
601 n = cpuid_eax(2) & 0xFF;
602
8bdbd962 603 for (i = 0 ; i < n ; i++) {
1da177e4
LT
604 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
605
606 /* If bit 31 is set, this is an unknown format */
8bdbd962
AC
607 for (j = 0 ; j < 3 ; j++)
608 if (regs[j] & (1 << 31))
609 regs[j] = 0;
1da177e4
LT
610
611 /* Byte 0 is level count, not a descriptor */
8bdbd962 612 for (j = 1 ; j < 16 ; j++) {
1da177e4
LT
613 unsigned char des = dp[j];
614 unsigned char k = 0;
615
616 /* look up this descriptor in the table */
8bdbd962 617 while (cache_table[k].descriptor != 0) {
1da177e4 618 if (cache_table[k].descriptor == des) {
b06be912
SL
619 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
620 break;
1da177e4
LT
621 switch (cache_table[k].cache_type) {
622 case LVL_1_INST:
623 l1i += cache_table[k].size;
624 break;
625 case LVL_1_DATA:
626 l1d += cache_table[k].size;
627 break;
628 case LVL_2:
629 l2 += cache_table[k].size;
630 break;
631 case LVL_3:
632 l3 += cache_table[k].size;
633 break;
634 case LVL_TRACE:
635 trace += cache_table[k].size;
636 break;
637 }
638
639 break;
640 }
641
642 k++;
643 }
644 }
645 }
b06be912 646 }
1da177e4 647
b06be912
SL
648 if (new_l1d)
649 l1d = new_l1d;
1da177e4 650
b06be912
SL
651 if (new_l1i)
652 l1i = new_l1i;
1da177e4 653
b06be912
SL
654 if (new_l2) {
655 l2 = new_l2;
96c52749 656#ifdef CONFIG_X86_HT
b6278470 657 per_cpu(cpu_llc_id, cpu) = l2_id;
1e9f28fa 658#endif
b06be912 659 }
1da177e4 660
b06be912
SL
661 if (new_l3) {
662 l3 = new_l3;
96c52749 663#ifdef CONFIG_X86_HT
b6278470 664 per_cpu(cpu_llc_id, cpu) = l3_id;
1e9f28fa 665#endif
1da177e4
LT
666 }
667
b06be912
SL
668 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
669
1da177e4
LT
670 return l2;
671}
672
ba1d755a
IM
673#ifdef CONFIG_SYSFS
674
1da177e4 675/* pointer to _cpuid4_info array (for each cache leaf) */
0fe1e009
TH
676static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
677#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
1da177e4
LT
678
679#ifdef CONFIG_SMP
1aa1a9f9 680static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
1da177e4 681{
2b091875 682 struct _cpuid4_info *this_leaf, *sibling_leaf;
1da177e4 683 unsigned long num_threads_sharing;
ebb682f5 684 int index_msb, i, sibling;
92cb7612 685 struct cpuinfo_x86 *c = &cpu_data(cpu);
1da177e4 686
a326e948 687 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
ebb682f5 688 for_each_cpu(i, c->llc_shared_map) {
0fe1e009 689 if (!per_cpu(ici_cpuid4_info, i))
a326e948 690 continue;
a326e948 691 this_leaf = CPUID4_INFO_IDX(i, index);
ebb682f5
PB
692 for_each_cpu(sibling, c->llc_shared_map) {
693 if (!cpu_online(sibling))
694 continue;
695 set_bit(sibling, this_leaf->shared_cpu_map);
696 }
a326e948
AH
697 }
698 return;
699 }
1da177e4
LT
700 this_leaf = CPUID4_INFO_IDX(cpu, index);
701 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
702
703 if (num_threads_sharing == 1)
f9b90566 704 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
2b091875
SS
705 else {
706 index_msb = get_count_order(num_threads_sharing);
707
708 for_each_online_cpu(i) {
92cb7612
MT
709 if (cpu_data(i).apicid >> index_msb ==
710 c->apicid >> index_msb) {
f9b90566
MT
711 cpumask_set_cpu(i,
712 to_cpumask(this_leaf->shared_cpu_map));
0fe1e009 713 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
f9b90566
MT
714 sibling_leaf =
715 CPUID4_INFO_IDX(i, index);
716 cpumask_set_cpu(cpu, to_cpumask(
717 sibling_leaf->shared_cpu_map));
2b091875
SS
718 }
719 }
720 }
721 }
722}
3bc9b76b 723static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
2b091875
SS
724{
725 struct _cpuid4_info *this_leaf, *sibling_leaf;
726 int sibling;
727
728 this_leaf = CPUID4_INFO_IDX(cpu, index);
f9b90566 729 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
cdcf772e 730 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
f9b90566
MT
731 cpumask_clear_cpu(cpu,
732 to_cpumask(sibling_leaf->shared_cpu_map));
2b091875 733 }
1da177e4
LT
734}
735#else
8bdbd962
AC
736static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
737{
738}
739
740static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
741{
742}
1da177e4
LT
743#endif
744
f22d9bc1 745static void __cpuinit free_cache_attributes(unsigned int cpu)
1da177e4 746{
ef1d7151
AM
747 int i;
748
749 for (i = 0; i < num_cache_leaves; i++)
750 cache_remove_shared_cpu_map(cpu, i);
751
9350f982 752 kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
0fe1e009
TH
753 kfree(per_cpu(ici_cpuid4_info, cpu));
754 per_cpu(ici_cpuid4_info, cpu) = NULL;
1da177e4
LT
755}
756
bd0838fc
HS
757static int
758__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
759{
760 struct _cpuid4_info_regs *leaf_regs =
761 (struct _cpuid4_info_regs *)this_leaf;
762
763 return cpuid4_cache_lookup_regs(index, leaf_regs);
764}
765
6092848a 766static void __cpuinit get_cpu_leaves(void *_retval)
1da177e4 767{
b2bb8554 768 int j, *retval = _retval, cpu = smp_processor_id();
e2cac789 769
1da177e4
LT
770 /* Do cpuid and store the results */
771 for (j = 0; j < num_cache_leaves; j++) {
b2bb8554 772 struct _cpuid4_info *this_leaf;
1da177e4 773 this_leaf = CPUID4_INFO_IDX(cpu, j);
b2bb8554
MT
774 *retval = cpuid4_cache_lookup(j, this_leaf);
775 if (unlikely(*retval < 0)) {
ef1d7151
AM
776 int i;
777
778 for (i = 0; i < j; i++)
779 cache_remove_shared_cpu_map(cpu, i);
e2cac789 780 break;
ef1d7151 781 }
1da177e4
LT
782 cache_shared_cpu_map_setup(cpu, j);
783 }
b2bb8554
MT
784}
785
786static int __cpuinit detect_cache_attributes(unsigned int cpu)
787{
788 int retval;
789
790 if (num_cache_leaves == 0)
791 return -ENOENT;
792
0fe1e009 793 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
b2bb8554 794 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
0fe1e009 795 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
b2bb8554 796 return -ENOMEM;
1da177e4 797
b2bb8554 798 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
ef1d7151 799 if (retval) {
0fe1e009
TH
800 kfree(per_cpu(ici_cpuid4_info, cpu));
801 per_cpu(ici_cpuid4_info, cpu) = NULL;
ef1d7151
AM
802 }
803
e2cac789 804 return retval;
1da177e4
LT
805}
806
1da177e4
LT
807#include <linux/kobject.h>
808#include <linux/sysfs.h>
809
810extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
811
812/* pointer to kobject for cpuX/cache */
0fe1e009 813static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
1da177e4
LT
814
815struct _index_kobject {
816 struct kobject kobj;
817 unsigned int cpu;
818 unsigned short index;
819};
820
821/* pointer to array of kobjects for cpuX/cache/indexY */
0fe1e009
TH
822static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
823#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
1da177e4
LT
824
825#define show_one_plus(file_name, object, val) \
826static ssize_t show_##file_name \
827 (struct _cpuid4_info *this_leaf, char *buf) \
828{ \
8bdbd962 829 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
1da177e4
LT
830}
831
832show_one_plus(level, eax.split.level, 0);
833show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
834show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
835show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
836show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
837
838static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
839{
8bdbd962 840 return sprintf(buf, "%luK\n", this_leaf->size / 1024);
1da177e4
LT
841}
842
fb0f330e
MT
843static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
844 int type, char *buf)
1da177e4 845{
fb0f330e 846 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
6b6309b4 847 int n = 0;
6b6309b4 848
fb0f330e 849 if (len > 1) {
f9b90566 850 const struct cpumask *mask;
fb0f330e 851
f9b90566 852 mask = to_cpumask(this_leaf->shared_cpu_map);
8bdbd962 853 n = type ?
29c0177e
RR
854 cpulist_scnprintf(buf, len-2, mask) :
855 cpumask_scnprintf(buf, len-2, mask);
fb0f330e
MT
856 buf[n++] = '\n';
857 buf[n] = '\0';
6b6309b4
MT
858 }
859 return n;
1da177e4
LT
860}
861
fb0f330e
MT
862static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
863{
864 return show_shared_cpu_map_func(leaf, 0, buf);
865}
866
867static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
868{
869 return show_shared_cpu_map_func(leaf, 1, buf);
870}
871
4385cecf
JS
872static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
873{
874 switch (this_leaf->eax.split.type) {
875 case CACHE_TYPE_DATA:
1da177e4 876 return sprintf(buf, "Data\n");
4385cecf 877 case CACHE_TYPE_INST:
1da177e4 878 return sprintf(buf, "Instruction\n");
4385cecf 879 case CACHE_TYPE_UNIFIED:
1da177e4 880 return sprintf(buf, "Unified\n");
4385cecf 881 default:
1da177e4 882 return sprintf(buf, "Unknown\n");
1da177e4
LT
883 }
884}
885
7a4983bb
IM
886#define to_object(k) container_of(k, struct _index_kobject, kobj)
887#define to_attr(a) container_of(a, struct _cache_attr, attr)
8cb22bcb 888
1da177e4
LT
889#define define_one_ro(_name) \
890static struct _cache_attr _name = \
891 __ATTR(_name, 0444, show_##_name, NULL)
892
893define_one_ro(level);
894define_one_ro(type);
895define_one_ro(coherency_line_size);
896define_one_ro(physical_line_partition);
897define_one_ro(ways_of_associativity);
898define_one_ro(number_of_sets);
899define_one_ro(size);
900define_one_ro(shared_cpu_map);
fb0f330e 901define_one_ro(shared_cpu_list);
1da177e4 902
897de50e
BP
903#define DEFAULT_SYSFS_CACHE_ATTRS \
904 &type.attr, \
905 &level.attr, \
906 &coherency_line_size.attr, \
907 &physical_line_partition.attr, \
908 &ways_of_associativity.attr, \
909 &number_of_sets.attr, \
910 &size.attr, \
911 &shared_cpu_map.attr, \
912 &shared_cpu_list.attr
8cb22bcb 913
8bdbd962 914static struct attribute *default_attrs[] = {
897de50e
BP
915 DEFAULT_SYSFS_CACHE_ATTRS,
916 NULL
917};
918
919static struct attribute *default_l3_attrs[] = {
920 DEFAULT_SYSFS_CACHE_ATTRS,
cb19060a 921#ifdef CONFIG_CPU_SUP_AMD
f8b201fc
ML
922 &cache_disable_0.attr,
923 &cache_disable_1.attr,
cb19060a 924#endif
1da177e4
LT
925 NULL
926};
927
8bdbd962 928static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4
LT
929{
930 struct _cache_attr *fattr = to_attr(attr);
931 struct _index_kobject *this_leaf = to_object(kobj);
932 ssize_t ret;
933
934 ret = fattr->show ?
935 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
936 buf) :
cdcf772e 937 0;
1da177e4
LT
938 return ret;
939}
940
8bdbd962
AC
941static ssize_t store(struct kobject *kobj, struct attribute *attr,
942 const char *buf, size_t count)
1da177e4 943{
8cb22bcb
ML
944 struct _cache_attr *fattr = to_attr(attr);
945 struct _index_kobject *this_leaf = to_object(kobj);
946 ssize_t ret;
947
cdcf772e
IM
948 ret = fattr->store ?
949 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
950 buf, count) :
8cb22bcb
ML
951 0;
952 return ret;
1da177e4
LT
953}
954
52cf25d0 955static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
956 .show = show,
957 .store = store,
958};
959
960static struct kobj_type ktype_cache = {
961 .sysfs_ops = &sysfs_ops,
962 .default_attrs = default_attrs,
963};
964
965static struct kobj_type ktype_percpu_entry = {
966 .sysfs_ops = &sysfs_ops,
967};
968
ef1d7151 969static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1da177e4 970{
0fe1e009
TH
971 kfree(per_cpu(ici_cache_kobject, cpu));
972 kfree(per_cpu(ici_index_kobject, cpu));
973 per_cpu(ici_cache_kobject, cpu) = NULL;
974 per_cpu(ici_index_kobject, cpu) = NULL;
1da177e4
LT
975 free_cache_attributes(cpu);
976}
977
1aa1a9f9 978static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1da177e4 979{
ef1d7151 980 int err;
1da177e4
LT
981
982 if (num_cache_leaves == 0)
983 return -ENOENT;
984
ef1d7151
AM
985 err = detect_cache_attributes(cpu);
986 if (err)
987 return err;
1da177e4
LT
988
989 /* Allocate all required memory */
0fe1e009 990 per_cpu(ici_cache_kobject, cpu) =
6b6309b4 991 kzalloc(sizeof(struct kobject), GFP_KERNEL);
0fe1e009 992 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1da177e4 993 goto err_out;
1da177e4 994
0fe1e009 995 per_cpu(ici_index_kobject, cpu) = kzalloc(
8bdbd962 996 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
0fe1e009 997 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1da177e4 998 goto err_out;
1da177e4
LT
999
1000 return 0;
1001
1002err_out:
1003 cpuid4_cache_sysfs_exit(cpu);
1004 return -ENOMEM;
1005}
1006
f9b90566 1007static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
ef1d7151 1008
1da177e4 1009/* Add/Remove cache interface for CPU device */
1aa1a9f9 1010static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
1da177e4
LT
1011{
1012 unsigned int cpu = sys_dev->id;
1013 unsigned long i, j;
1014 struct _index_kobject *this_object;
897de50e 1015 struct _cpuid4_info *this_leaf;
ef1d7151 1016 int retval;
1da177e4
LT
1017
1018 retval = cpuid4_cache_sysfs_init(cpu);
1019 if (unlikely(retval < 0))
1020 return retval;
1021
0fe1e009 1022 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
6b6309b4 1023 &ktype_percpu_entry,
5b3f355d 1024 &sys_dev->kobj, "%s", "cache");
ef1d7151
AM
1025 if (retval < 0) {
1026 cpuid4_cache_sysfs_exit(cpu);
1027 return retval;
1028 }
1da177e4
LT
1029
1030 for (i = 0; i < num_cache_leaves; i++) {
8bdbd962 1031 this_object = INDEX_KOBJECT_PTR(cpu, i);
1da177e4
LT
1032 this_object->cpu = cpu;
1033 this_object->index = i;
897de50e
BP
1034
1035 this_leaf = CPUID4_INFO_IDX(cpu, i);
1036
9350f982 1037 if (this_leaf->l3 && this_leaf->l3->can_disable)
897de50e
BP
1038 ktype_cache.default_attrs = default_l3_attrs;
1039 else
1040 ktype_cache.default_attrs = default_attrs;
1041
5b3f355d 1042 retval = kobject_init_and_add(&(this_object->kobj),
6b6309b4 1043 &ktype_cache,
0fe1e009 1044 per_cpu(ici_cache_kobject, cpu),
5b3f355d 1045 "index%1lu", i);
1da177e4 1046 if (unlikely(retval)) {
8bdbd962
AC
1047 for (j = 0; j < i; j++)
1048 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
0fe1e009 1049 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1050 cpuid4_cache_sysfs_exit(cpu);
8b2b9c1a 1051 return retval;
1da177e4 1052 }
5b3f355d 1053 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1da177e4 1054 }
f9b90566 1055 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151 1056
0fe1e009 1057 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
8b2b9c1a 1058 return 0;
1da177e4
LT
1059}
1060
114ab8e9 1061static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
1da177e4
LT
1062{
1063 unsigned int cpu = sys_dev->id;
1064 unsigned long i;
1065
0fe1e009 1066 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
2966c6a0 1067 return;
f9b90566 1068 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
ef1d7151 1069 return;
f9b90566 1070 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
ef1d7151
AM
1071
1072 for (i = 0; i < num_cache_leaves; i++)
8bdbd962 1073 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
0fe1e009 1074 kobject_put(per_cpu(ici_cache_kobject, cpu));
1da177e4 1075 cpuid4_cache_sysfs_exit(cpu);
1aa1a9f9
AR
1076}
1077
9c7b216d 1078static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1aa1a9f9
AR
1079 unsigned long action, void *hcpu)
1080{
1081 unsigned int cpu = (unsigned long)hcpu;
1082 struct sys_device *sys_dev;
1083
1084 sys_dev = get_cpu_sysdev(cpu);
1085 switch (action) {
1086 case CPU_ONLINE:
8bb78442 1087 case CPU_ONLINE_FROZEN:
1aa1a9f9
AR
1088 cache_add_dev(sys_dev);
1089 break;
1090 case CPU_DEAD:
8bb78442 1091 case CPU_DEAD_FROZEN:
1aa1a9f9
AR
1092 cache_remove_dev(sys_dev);
1093 break;
1094 }
1095 return NOTIFY_OK;
1da177e4
LT
1096}
1097
8bdbd962 1098static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
ef1d7151 1099 .notifier_call = cacheinfo_cpu_callback,
1da177e4
LT
1100};
1101
1aa1a9f9 1102static int __cpuinit cache_sysfs_init(void)
1da177e4 1103{
1aa1a9f9
AR
1104 int i;
1105
1da177e4
LT
1106 if (num_cache_leaves == 0)
1107 return 0;
1108
1aa1a9f9 1109 for_each_online_cpu(i) {
ef1d7151
AM
1110 int err;
1111 struct sys_device *sys_dev = get_cpu_sysdev(i);
c789c037 1112
ef1d7151
AM
1113 err = cache_add_dev(sys_dev);
1114 if (err)
1115 return err;
1aa1a9f9 1116 }
ef1d7151 1117 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1aa1a9f9 1118 return 0;
1da177e4
LT
1119}
1120
1aa1a9f9 1121device_initcall(cache_sysfs_init);
1da177e4
LT
1122
1123#endif