]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kernel/cpu/mtrr/generic.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
[net-next-2.6.git] / arch / x86 / kernel / cpu / mtrr / generic.c
CommitLineData
1da177e4
LT
1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3#include <linux/init.h>
4#include <linux/slab.h>
5#include <linux/mm.h>
365bff80 6#include <linux/module.h>
1da177e4
LT
7#include <asm/io.h>
8#include <asm/mtrr.h>
9#include <asm/msr.h>
10#include <asm/system.h>
11#include <asm/cpufeature.h>
7ebad705 12#include <asm/processor-flags.h>
1da177e4 13#include <asm/tlbflush.h>
2e5d9c85 14#include <asm/pat.h>
1da177e4
LT
15#include "mtrr.h"
16
de938c51
BK
17struct fixed_range_block {
18 int base_msr; /* start address of an MTRR block */
19 int ranges; /* number of MTRRs in this block */
20};
21
22static struct fixed_range_block fixed_range_blocks[] = {
23 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
24 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
25 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
26 {}
27};
28
1da177e4 29static unsigned long smp_changes_mask;
2e5d9c85 30static int mtrr_state_set;
95ffa243 31u64 mtrr_tom2;
1da177e4 32
932d27a7
SY
33struct mtrr_state_type mtrr_state = {};
34EXPORT_SYMBOL_GPL(mtrr_state);
35
731f1872
TR
36static int __initdata mtrr_show;
37static int __init mtrr_debug(char *opt)
38{
39 mtrr_show = 1;
40 return 0;
41}
42early_param("mtrr.show", mtrr_debug);
365bff80 43
2e5d9c85 44/*
45 * Returns the effective MTRR type for the region
46 * Error returns:
47 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
48 * - 0xFF - when MTRR is not enabled
49 */
50u8 mtrr_type_lookup(u64 start, u64 end)
51{
52 int i;
53 u64 base, mask;
54 u8 prev_match, curr_match;
55
56 if (!mtrr_state_set)
57 return 0xFF;
58
59 if (!mtrr_state.enabled)
60 return 0xFF;
61
62 /* Make end inclusive end, instead of exclusive */
63 end--;
64
65 /* Look in fixed ranges. Just return the type as per start */
66 if (mtrr_state.have_fixed && (start < 0x100000)) {
67 int idx;
68
69 if (start < 0x80000) {
70 idx = 0;
71 idx += (start >> 16);
72 return mtrr_state.fixed_ranges[idx];
73 } else if (start < 0xC0000) {
74 idx = 1 * 8;
75 idx += ((start - 0x80000) >> 14);
76 return mtrr_state.fixed_ranges[idx];
77 } else if (start < 0x1000000) {
78 idx = 3 * 8;
79 idx += ((start - 0xC0000) >> 12);
80 return mtrr_state.fixed_ranges[idx];
81 }
82 }
83
84 /*
85 * Look in variable ranges
86 * Look of multiple ranges matching this address and pick type
87 * as per MTRR precedence
88 */
e686d341 89 if (!(mtrr_state.enabled & 2)) {
2e5d9c85 90 return mtrr_state.def_type;
91 }
92
93 prev_match = 0xFF;
94 for (i = 0; i < num_var_ranges; ++i) {
95 unsigned short start_state, end_state;
96
97 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
98 continue;
99
100 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
101 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
102 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
103 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
104
105 start_state = ((start & mask) == (base & mask));
106 end_state = ((end & mask) == (base & mask));
107 if (start_state != end_state)
108 return 0xFE;
109
110 if ((start & mask) != (base & mask)) {
111 continue;
112 }
113
114 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
115 if (prev_match == 0xFF) {
116 prev_match = curr_match;
117 continue;
118 }
119
120 if (prev_match == MTRR_TYPE_UNCACHABLE ||
121 curr_match == MTRR_TYPE_UNCACHABLE) {
122 return MTRR_TYPE_UNCACHABLE;
123 }
124
125 if ((prev_match == MTRR_TYPE_WRBACK &&
126 curr_match == MTRR_TYPE_WRTHROUGH) ||
127 (prev_match == MTRR_TYPE_WRTHROUGH &&
128 curr_match == MTRR_TYPE_WRBACK)) {
129 prev_match = MTRR_TYPE_WRTHROUGH;
130 curr_match = MTRR_TYPE_WRTHROUGH;
131 }
132
133 if (prev_match != curr_match) {
134 return MTRR_TYPE_UNCACHABLE;
135 }
136 }
137
95ffa243
YL
138 if (mtrr_tom2) {
139 if (start >= (1ULL<<32) && (end < mtrr_tom2))
35605a10
YL
140 return MTRR_TYPE_WRBACK;
141 }
142
2e5d9c85 143 if (prev_match != 0xFF)
144 return prev_match;
145
146 return mtrr_state.def_type;
147}
148
1da177e4 149/* Get the MSR pair relating to a var range */
bf8c4817 150static void
1da177e4
LT
151get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
152{
153 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
154 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
155}
156
95ffa243
YL
157/* fill the MSR pair relating to a var range */
158void fill_mtrr_var_range(unsigned int index,
159 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
160{
161 struct mtrr_var_range *vr;
162
163 vr = mtrr_state.var_ranges;
164
165 vr[index].base_lo = base_lo;
166 vr[index].base_hi = base_hi;
167 vr[index].mask_lo = mask_lo;
168 vr[index].mask_hi = mask_hi;
169}
170
2b3b4835 171static void
1da177e4
LT
172get_fixed_ranges(mtrr_type * frs)
173{
174 unsigned int *p = (unsigned int *) frs;
175 int i;
176
177 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
178
179 for (i = 0; i < 2; i++)
180 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
181 for (i = 0; i < 8; i++)
182 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
183}
184
2b3b4835
BK
185void mtrr_save_fixed_ranges(void *info)
186{
84288ad8
AM
187 if (cpu_has_mtrr)
188 get_fixed_ranges(mtrr_state.fixed_ranges);
2b3b4835
BK
189}
190
bf8c4817 191static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
365bff80
JB
192{
193 unsigned i;
194
195 for (i = 0; i < 8; ++i, ++types, base += step)
25c16b99
RD
196 printk(KERN_INFO "MTRR %05X-%05X %s\n",
197 base, base + step - 1, mtrr_attrib_to_str(*types));
365bff80
JB
198}
199
2e5d9c85 200static void prepare_set(void);
201static void post_set(void);
202
1da177e4 203/* Grab all of the MTRR state for this CPU into *state */
9ef231a4 204void __init get_mtrr_state(void)
1da177e4
LT
205{
206 unsigned int i;
207 struct mtrr_var_range *vrs;
208 unsigned lo, dummy;
2e5d9c85 209 unsigned long flags;
1da177e4 210
1da177e4
LT
211 vrs = mtrr_state.var_ranges;
212
365bff80
JB
213 rdmsr(MTRRcap_MSR, lo, dummy);
214 mtrr_state.have_fixed = (lo >> 8) & 1;
215
1da177e4
LT
216 for (i = 0; i < num_var_ranges; i++)
217 get_mtrr_var_range(i, &vrs[i]);
365bff80
JB
218 if (mtrr_state.have_fixed)
219 get_fixed_ranges(mtrr_state.fixed_ranges);
1da177e4
LT
220
221 rdmsr(MTRRdefType_MSR, lo, dummy);
222 mtrr_state.def_type = (lo & 0xff);
223 mtrr_state.enabled = (lo & 0xc00) >> 10;
365bff80 224
35605a10 225 if (amd_special_default_mtrr()) {
0da72a4a 226 unsigned low, high;
35605a10 227 /* TOP_MEM2 */
0da72a4a 228 rdmsr(MSR_K8_TOP_MEM2, low, high);
95ffa243
YL
229 mtrr_tom2 = high;
230 mtrr_tom2 <<= 32;
231 mtrr_tom2 |= low;
8004dd96 232 mtrr_tom2 &= 0xffffff800000ULL;
35605a10 233 }
365bff80
JB
234 if (mtrr_show) {
235 int high_width;
236
237 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
238 if (mtrr_state.have_fixed) {
239 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
240 mtrr_state.enabled & 1 ? "en" : "dis");
241 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
242 for (i = 0; i < 2; ++i)
243 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
244 for (i = 0; i < 8; ++i)
245 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
246 }
247 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
248 mtrr_state.enabled & 2 ? "en" : "dis");
249 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
250 for (i = 0; i < num_var_ranges; ++i) {
251 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
252 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
253 i,
254 high_width,
255 mtrr_state.var_ranges[i].base_hi,
256 mtrr_state.var_ranges[i].base_lo >> 12,
257 high_width,
258 mtrr_state.var_ranges[i].mask_hi,
259 mtrr_state.var_ranges[i].mask_lo >> 12,
260 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
261 else
262 printk(KERN_INFO "MTRR %u disabled\n", i);
263 }
95ffa243 264 if (mtrr_tom2) {
a7c7d0e9 265 printk(KERN_INFO "TOM2: %016llx aka %lldM\n",
95ffa243 266 mtrr_tom2, mtrr_tom2>>20);
a7c7d0e9 267 }
365bff80 268 }
2e5d9c85 269 mtrr_state_set = 1;
270
271 /* PAT setup for BP. We need to go through sync steps here */
272 local_irq_save(flags);
273 prepare_set();
274
275 pat_init();
276
277 post_set();
278 local_irq_restore(flags);
279
1da177e4
LT
280}
281
1da177e4
LT
282/* Some BIOS's are fucked and don't set all MTRRs the same! */
283void __init mtrr_state_warn(void)
284{
285 unsigned long mask = smp_changes_mask;
286
287 if (!mask)
288 return;
289 if (mask & MTRR_CHANGE_MASK_FIXED)
290 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
291 if (mask & MTRR_CHANGE_MASK_VARIABLE)
292 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
293 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
294 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
295 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
296 printk(KERN_INFO "mtrr: corrected configuration.\n");
297}
298
299/* Doesn't attempt to pass an error out to MTRR users
300 because it's quite complicated in some cases and probably not
301 worth it because the best error handling is to ignore it. */
302void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
303{
304 if (wrmsr_safe(msr, a, b) < 0)
305 printk(KERN_ERR
306 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
307 smp_processor_id(), msr, a, b);
308}
309
de938c51
BK
310/**
311 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
312 * see AMD publication no. 24593, chapter 3.2.1 for more information
313 */
314static inline void k8_enable_fixed_iorrs(void)
315{
316 unsigned lo, hi;
317
318 rdmsr(MSR_K8_SYSCFG, lo, hi);
319 mtrr_wrmsr(MSR_K8_SYSCFG, lo
320 | K8_MTRRFIXRANGE_DRAM_ENABLE
321 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
322}
323
324/**
1d3381eb
RD
325 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
326 * @msr: MSR address of the MTTR which should be checked and updated
327 * @changed: pointer which indicates whether the MTRR needed to be changed
328 * @msrwords: pointer to the MSR values which the MSR should have
329 *
330 * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
331 * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
de938c51 332 */
2d2ee8de 333static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
de938c51
BK
334{
335 unsigned lo, hi;
336
337 rdmsr(msr, lo, hi);
338
339 if (lo != msrwords[0] || hi != msrwords[1]) {
340 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
833e78bf 341 (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
de938c51
BK
342 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
343 k8_enable_fixed_iorrs();
344 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
2d2ee8de 345 *changed = true;
de938c51
BK
346 }
347}
348
1d3381eb
RD
349/**
350 * generic_get_free_region - Get a free MTRR.
351 * @base: The starting (base) address of the region.
352 * @size: The size (in bytes) of the region.
353 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
354 *
355 * Returns: The index of the region on success, else negative on error.
356 */
365bff80 357int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
1da177e4
LT
358{
359 int i, max;
360 mtrr_type ltype;
365bff80 361 unsigned long lbase, lsize;
1da177e4
LT
362
363 max = num_var_ranges;
365bff80
JB
364 if (replace_reg >= 0 && replace_reg < max)
365 return replace_reg;
1da177e4
LT
366 for (i = 0; i < max; ++i) {
367 mtrr_if->get(i, &lbase, &lsize, &ltype);
368 if (lsize == 0)
369 return i;
370 }
371 return -ENOSPC;
372}
373
408b664a 374static void generic_get_mtrr(unsigned int reg, unsigned long *base,
365bff80 375 unsigned long *size, mtrr_type *type)
1da177e4
LT
376{
377 unsigned int mask_lo, mask_hi, base_lo, base_hi;
38cc1c3d 378 unsigned int tmp, hi;
1da177e4
LT
379
380 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
381 if ((mask_lo & 0x800) == 0) {
382 /* Invalid (i.e. free) range */
383 *base = 0;
384 *size = 0;
385 *type = 0;
386 return;
387 }
388
389 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
390
391 /* Work out the shifted address mask. */
38cc1c3d
YL
392 tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
393 mask_lo = size_or_mask | tmp;
394 /* Expand tmp with high bits to all 1s*/
395 hi = fls(tmp);
396 if (hi > 0) {
397 tmp |= ~((1<<(hi - 1)) - 1);
398
399 if (tmp != mask_lo) {
16dc552f 400 WARN_ONCE(1, KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n");
38cc1c3d
YL
401 mask_lo = tmp;
402 }
403 }
1da177e4
LT
404
405 /* This works correctly if size is a power of two, i.e. a
406 contiguous range. */
407 *size = -mask_lo;
408 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
409 *type = base_lo & 0xff;
410}
411
de938c51 412/**
1d3381eb
RD
413 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
414 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
de938c51 415 */
1da177e4
LT
416static int set_fixed_ranges(mtrr_type * frs)
417{
de938c51 418 unsigned long long *saved = (unsigned long long *) frs;
2d2ee8de 419 bool changed = false;
de938c51 420 int block=-1, range;
1da177e4 421
de938c51
BK
422 while (fixed_range_blocks[++block].ranges)
423 for (range=0; range < fixed_range_blocks[block].ranges; range++)
424 set_fixed_range(fixed_range_blocks[block].base_msr + range,
425 &changed, (unsigned int *) saved++);
1da177e4 426
1da177e4
LT
427 return changed;
428}
429
430/* Set the MSR pair relating to a var range. Returns TRUE if
431 changes are made */
2d2ee8de 432static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
1da177e4
LT
433{
434 unsigned int lo, hi;
2d2ee8de 435 bool changed = false;
1da177e4
LT
436
437 rdmsr(MTRRphysBase_MSR(index), lo, hi);
438 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
cf94b62f
SS
439 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
440 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
1da177e4 441 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
2d2ee8de 442 changed = true;
1da177e4
LT
443 }
444
445 rdmsr(MTRRphysMask_MSR(index), lo, hi);
446
447 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
cf94b62f
SS
448 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
449 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
1da177e4 450 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
2d2ee8de 451 changed = true;
1da177e4
LT
452 }
453 return changed;
454}
455
365bff80
JB
456static u32 deftype_lo, deftype_hi;
457
1d3381eb
RD
458/**
459 * set_mtrr_state - Set the MTRR state for this CPU.
460 *
461 * NOTE: The CPU must already be in a safe state for MTRR changes.
462 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
463 */
365bff80 464static unsigned long set_mtrr_state(void)
1da177e4
LT
465{
466 unsigned int i;
467 unsigned long change_mask = 0;
468
469 for (i = 0; i < num_var_ranges; i++)
470 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
471 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
472
365bff80 473 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
1da177e4
LT
474 change_mask |= MTRR_CHANGE_MASK_FIXED;
475
476 /* Set_mtrr_restore restores the old value of MTRRdefType,
477 so to set it we fiddle with the saved value */
478 if ((deftype_lo & 0xff) != mtrr_state.def_type
479 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
365bff80 480 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
1da177e4
LT
481 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
482 }
483
484 return change_mask;
485}
486
487
488static unsigned long cr4 = 0;
1da177e4
LT
489static DEFINE_SPINLOCK(set_atomicity_lock);
490
491/*
492 * Since we are disabling the cache don't allow any interrupts - they
493 * would run extremely slow and would only increase the pain. The caller must
494 * ensure that local interrupts are disabled and are reenabled after post_set()
495 * has been called.
496 */
497
182daa55 498static void prepare_set(void) __acquires(set_atomicity_lock)
1da177e4
LT
499{
500 unsigned long cr0;
501
502 /* Note that this is not ideal, since the cache is only flushed/disabled
503 for this CPU while the MTRRs are changed, but changing this requires
504 more invasive changes to the way the kernel boots */
505
506 spin_lock(&set_atomicity_lock);
507
508 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
7ebad705 509 cr0 = read_cr0() | X86_CR0_CD;
1da177e4
LT
510 write_cr0(cr0);
511 wbinvd();
512
513 /* Save value of CR4 and clear Page Global Enable (bit 7) */
514 if ( cpu_has_pge ) {
515 cr4 = read_cr4();
516 write_cr4(cr4 & ~X86_CR4_PGE);
517 }
518
519 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
520 __flush_tlb();
521
522 /* Save MTRR state */
523 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
524
525 /* Disable MTRRs, and set the default type to uncached */
365bff80 526 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
1da177e4
LT
527}
528
182daa55 529static void post_set(void) __releases(set_atomicity_lock)
1da177e4
LT
530{
531 /* Flush TLBs (no need to flush caches - they are disabled) */
532 __flush_tlb();
533
534 /* Intel (P6) standard MTRRs */
535 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
536
537 /* Enable caches */
538 write_cr0(read_cr0() & 0xbfffffff);
539
540 /* Restore value of CR4 */
541 if ( cpu_has_pge )
542 write_cr4(cr4);
543 spin_unlock(&set_atomicity_lock);
544}
545
546static void generic_set_all(void)
547{
548 unsigned long mask, count;
549 unsigned long flags;
550
551 local_irq_save(flags);
552 prepare_set();
553
554 /* Actually set the state */
365bff80 555 mask = set_mtrr_state();
1da177e4 556
2e5d9c85 557 /* also set PAT */
558 pat_init();
559
1da177e4
LT
560 post_set();
561 local_irq_restore(flags);
562
563 /* Use the atomic bitops to update the global mask */
564 for (count = 0; count < sizeof mask * 8; ++count) {
565 if (mask & 0x01)
566 set_bit(count, &smp_changes_mask);
567 mask >>= 1;
568 }
569
570}
571
572static void generic_set_mtrr(unsigned int reg, unsigned long base,
573 unsigned long size, mtrr_type type)
574/* [SUMMARY] Set variable MTRR register on the local CPU.
575 <reg> The register to set.
576 <base> The base address of the region.
577 <size> The size of the region. If this is 0 the region is disabled.
578 <type> The type of the region.
1da177e4
LT
579 [RETURNS] Nothing.
580*/
581{
582 unsigned long flags;
3b520b23
SL
583 struct mtrr_var_range *vr;
584
585 vr = &mtrr_state.var_ranges[reg];
1da177e4
LT
586
587 local_irq_save(flags);
588 prepare_set();
589
590 if (size == 0) {
591 /* The invalid bit is kept in the mask, so we simply clear the
592 relevant mask register to disable a range. */
593 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
3b520b23 594 memset(vr, 0, sizeof(struct mtrr_var_range));
1da177e4 595 } else {
3b520b23
SL
596 vr->base_lo = base << PAGE_SHIFT | type;
597 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
598 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
599 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
600
601 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
602 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
1da177e4
LT
603 }
604
605 post_set();
606 local_irq_restore(flags);
607}
608
609int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
610{
611 unsigned long lbase, last;
612
613 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
614 and not touch 0x70000000->0x7003FFFF */
615 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
616 boot_cpu_data.x86_model == 1 &&
617 boot_cpu_data.x86_mask <= 7) {
618 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
619 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
620 return -EINVAL;
621 }
9b483417 622 if (!(base + size < 0x70000 || base > 0x7003F) &&
1da177e4
LT
623 (type == MTRR_TYPE_WRCOMB
624 || type == MTRR_TYPE_WRBACK)) {
625 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
626 return -EINVAL;
627 }
628 }
629
1da177e4
LT
630 /* Check upper bits of base and last are equal and lower bits are 0
631 for base and 1 for last */
632 last = base + size - 1;
633 for (lbase = base; !(lbase & 1) && (last & 1);
634 lbase = lbase >> 1, last = last >> 1) ;
635 if (lbase != last) {
636 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
637 base, size);
638 return -EINVAL;
639 }
640 return 0;
641}
642
643
644static int generic_have_wrcomb(void)
645{
646 unsigned long config, dummy;
647 rdmsr(MTRRcap_MSR, config, dummy);
648 return (config & (1 << 10));
649}
650
651int positive_have_wrcomb(void)
652{
653 return 1;
654}
655
656/* generic structure...
657 */
658struct mtrr_ops generic_mtrr_ops = {
659 .use_intel_if = 1,
660 .set_all = generic_set_all,
661 .get = generic_get_mtrr,
662 .get_free_region = generic_get_free_region,
663 .set = generic_set_mtrr,
664 .validate_add_page = generic_validate_add_page,
665 .have_wrcomb = generic_have_wrcomb,
666};