1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
6 #include <linux/module.h>
10 #include <asm/system.h>
11 #include <asm/cpufeature.h>
12 #include <asm/processor-flags.h>
13 #include <asm/tlbflush.h>
18 struct mtrr_var_range var_ranges[MAX_VAR_RANGES];
19 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
20 unsigned char enabled;
21 unsigned char have_fixed;
25 struct fixed_range_block {
26 int base_msr; /* start address of an MTRR block */
27 int ranges; /* number of MTRRs in this block */
30 static struct fixed_range_block fixed_range_blocks[] = {
31 { MTRRfix64K_00000_MSR, 1 }, /* one 64k MTRR */
32 { MTRRfix16K_80000_MSR, 2 }, /* two 16k MTRRs */
33 { MTRRfix4K_C0000_MSR, 8 }, /* eight 4k MTRRs */
37 static unsigned long smp_changes_mask;
38 static struct mtrr_state mtrr_state = {};
39 static int mtrr_state_set;
41 #undef MODULE_PARAM_PREFIX
42 #define MODULE_PARAM_PREFIX "mtrr."
45 module_param_named(show, mtrr_show, bool, 0);
48 * Returns the effective MTRR type for the region
50 * - 0xFE - when the range is "not entirely covered" by _any_ var range MTRR
51 * - 0xFF - when MTRR is not enabled
53 u8 mtrr_type_lookup(u64 start, u64 end)
57 u8 prev_match, curr_match;
62 if (!mtrr_state.enabled)
65 /* Make end inclusive end, instead of exclusive */
68 /* Look in fixed ranges. Just return the type as per start */
69 if (mtrr_state.have_fixed && (start < 0x100000)) {
72 if (start < 0x80000) {
75 return mtrr_state.fixed_ranges[idx];
76 } else if (start < 0xC0000) {
78 idx += ((start - 0x80000) >> 14);
79 return mtrr_state.fixed_ranges[idx];
80 } else if (start < 0x1000000) {
82 idx += ((start - 0xC0000) >> 12);
83 return mtrr_state.fixed_ranges[idx];
88 * Look in variable ranges
89 * Look of multiple ranges matching this address and pick type
90 * as per MTRR precedence
92 if (!mtrr_state.enabled & 2) {
93 return mtrr_state.def_type;
97 for (i = 0; i < num_var_ranges; ++i) {
98 unsigned short start_state, end_state;
100 if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
103 base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
104 (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
105 mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
106 (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
108 start_state = ((start & mask) == (base & mask));
109 end_state = ((end & mask) == (base & mask));
110 if (start_state != end_state)
113 if ((start & mask) != (base & mask)) {
117 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
118 if (prev_match == 0xFF) {
119 prev_match = curr_match;
123 if (prev_match == MTRR_TYPE_UNCACHABLE ||
124 curr_match == MTRR_TYPE_UNCACHABLE) {
125 return MTRR_TYPE_UNCACHABLE;
128 if ((prev_match == MTRR_TYPE_WRBACK &&
129 curr_match == MTRR_TYPE_WRTHROUGH) ||
130 (prev_match == MTRR_TYPE_WRTHROUGH &&
131 curr_match == MTRR_TYPE_WRBACK)) {
132 prev_match = MTRR_TYPE_WRTHROUGH;
133 curr_match = MTRR_TYPE_WRTHROUGH;
136 if (prev_match != curr_match) {
137 return MTRR_TYPE_UNCACHABLE;
141 if (prev_match != 0xFF)
144 return mtrr_state.def_type;
147 /* Get the MSR pair relating to a var range */
149 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
151 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
152 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
156 get_fixed_ranges(mtrr_type * frs)
158 unsigned int *p = (unsigned int *) frs;
161 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
163 for (i = 0; i < 2; i++)
164 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
165 for (i = 0; i < 8; i++)
166 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
169 void mtrr_save_fixed_ranges(void *info)
172 get_fixed_ranges(mtrr_state.fixed_ranges);
175 static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
179 for (i = 0; i < 8; ++i, ++types, base += step)
180 printk(KERN_INFO "MTRR %05X-%05X %s\n",
181 base, base + step - 1, mtrr_attrib_to_str(*types));
184 static void prepare_set(void);
185 static void post_set(void);
187 /* Grab all of the MTRR state for this CPU into *state */
188 void __init get_mtrr_state(void)
191 struct mtrr_var_range *vrs;
195 vrs = mtrr_state.var_ranges;
197 rdmsr(MTRRcap_MSR, lo, dummy);
198 mtrr_state.have_fixed = (lo >> 8) & 1;
200 for (i = 0; i < num_var_ranges; i++)
201 get_mtrr_var_range(i, &vrs[i]);
202 if (mtrr_state.have_fixed)
203 get_fixed_ranges(mtrr_state.fixed_ranges);
205 rdmsr(MTRRdefType_MSR, lo, dummy);
206 mtrr_state.def_type = (lo & 0xff);
207 mtrr_state.enabled = (lo & 0xc00) >> 10;
212 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
213 if (mtrr_state.have_fixed) {
214 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
215 mtrr_state.enabled & 1 ? "en" : "dis");
216 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
217 for (i = 0; i < 2; ++i)
218 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
219 for (i = 0; i < 8; ++i)
220 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
222 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
223 mtrr_state.enabled & 2 ? "en" : "dis");
224 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
225 for (i = 0; i < num_var_ranges; ++i) {
226 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
227 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
230 mtrr_state.var_ranges[i].base_hi,
231 mtrr_state.var_ranges[i].base_lo >> 12,
233 mtrr_state.var_ranges[i].mask_hi,
234 mtrr_state.var_ranges[i].mask_lo >> 12,
235 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
237 printk(KERN_INFO "MTRR %u disabled\n", i);
242 /* PAT setup for BP. We need to go through sync steps here */
243 local_irq_save(flags);
249 local_irq_restore(flags);
253 /* Some BIOS's are fucked and don't set all MTRRs the same! */
254 void __init mtrr_state_warn(void)
256 unsigned long mask = smp_changes_mask;
260 if (mask & MTRR_CHANGE_MASK_FIXED)
261 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
262 if (mask & MTRR_CHANGE_MASK_VARIABLE)
263 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
264 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
265 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
266 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
267 printk(KERN_INFO "mtrr: corrected configuration.\n");
270 /* Doesn't attempt to pass an error out to MTRR users
271 because it's quite complicated in some cases and probably not
272 worth it because the best error handling is to ignore it. */
273 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
275 if (wrmsr_safe(msr, a, b) < 0)
277 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
278 smp_processor_id(), msr, a, b);
282 * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
283 * see AMD publication no. 24593, chapter 3.2.1 for more information
285 static inline void k8_enable_fixed_iorrs(void)
289 rdmsr(MSR_K8_SYSCFG, lo, hi);
290 mtrr_wrmsr(MSR_K8_SYSCFG, lo
291 | K8_MTRRFIXRANGE_DRAM_ENABLE
292 | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
296 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
297 * @msr: MSR address of the MTTR which should be checked and updated
298 * @changed: pointer which indicates whether the MTRR needed to be changed
299 * @msrwords: pointer to the MSR values which the MSR should have
301 * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
302 * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
304 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
310 if (lo != msrwords[0] || hi != msrwords[1]) {
311 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
312 boot_cpu_data.x86 == 15 &&
313 ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
314 k8_enable_fixed_iorrs();
315 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
321 * generic_get_free_region - Get a free MTRR.
322 * @base: The starting (base) address of the region.
323 * @size: The size (in bytes) of the region.
324 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
326 * Returns: The index of the region on success, else negative on error.
328 int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
332 unsigned long lbase, lsize;
334 max = num_var_ranges;
335 if (replace_reg >= 0 && replace_reg < max)
337 for (i = 0; i < max; ++i) {
338 mtrr_if->get(i, &lbase, &lsize, <ype);
345 static void generic_get_mtrr(unsigned int reg, unsigned long *base,
346 unsigned long *size, mtrr_type *type)
348 unsigned int mask_lo, mask_hi, base_lo, base_hi;
350 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
351 if ((mask_lo & 0x800) == 0) {
352 /* Invalid (i.e. free) range */
359 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
361 /* Work out the shifted address mask. */
362 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
363 | mask_lo >> PAGE_SHIFT;
365 /* This works correctly if size is a power of two, i.e. a
368 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
369 *type = base_lo & 0xff;
373 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set
374 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
376 static int set_fixed_ranges(mtrr_type * frs)
378 unsigned long long *saved = (unsigned long long *) frs;
379 bool changed = false;
382 while (fixed_range_blocks[++block].ranges)
383 for (range=0; range < fixed_range_blocks[block].ranges; range++)
384 set_fixed_range(fixed_range_blocks[block].base_msr + range,
385 &changed, (unsigned int *) saved++);
390 /* Set the MSR pair relating to a var range. Returns TRUE if
392 static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
395 bool changed = false;
397 rdmsr(MTRRphysBase_MSR(index), lo, hi);
398 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
399 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
400 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
401 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
405 rdmsr(MTRRphysMask_MSR(index), lo, hi);
407 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
408 || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
409 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
410 mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
416 static u32 deftype_lo, deftype_hi;
419 * set_mtrr_state - Set the MTRR state for this CPU.
421 * NOTE: The CPU must already be in a safe state for MTRR changes.
422 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
424 static unsigned long set_mtrr_state(void)
427 unsigned long change_mask = 0;
429 for (i = 0; i < num_var_ranges; i++)
430 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
431 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
433 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
434 change_mask |= MTRR_CHANGE_MASK_FIXED;
436 /* Set_mtrr_restore restores the old value of MTRRdefType,
437 so to set it we fiddle with the saved value */
438 if ((deftype_lo & 0xff) != mtrr_state.def_type
439 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
440 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
441 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
448 static unsigned long cr4 = 0;
449 static DEFINE_SPINLOCK(set_atomicity_lock);
452 * Since we are disabling the cache don't allow any interrupts - they
453 * would run extremely slow and would only increase the pain. The caller must
454 * ensure that local interrupts are disabled and are reenabled after post_set()
458 static void prepare_set(void) __acquires(set_atomicity_lock)
462 /* Note that this is not ideal, since the cache is only flushed/disabled
463 for this CPU while the MTRRs are changed, but changing this requires
464 more invasive changes to the way the kernel boots */
466 spin_lock(&set_atomicity_lock);
468 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
469 cr0 = read_cr0() | X86_CR0_CD;
473 /* Save value of CR4 and clear Page Global Enable (bit 7) */
476 write_cr4(cr4 & ~X86_CR4_PGE);
479 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
482 /* Save MTRR state */
483 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
485 /* Disable MTRRs, and set the default type to uncached */
486 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
489 static void post_set(void) __releases(set_atomicity_lock)
491 /* Flush TLBs (no need to flush caches - they are disabled) */
494 /* Intel (P6) standard MTRRs */
495 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
498 write_cr0(read_cr0() & 0xbfffffff);
500 /* Restore value of CR4 */
503 spin_unlock(&set_atomicity_lock);
506 static void generic_set_all(void)
508 unsigned long mask, count;
511 local_irq_save(flags);
514 /* Actually set the state */
515 mask = set_mtrr_state();
521 local_irq_restore(flags);
523 /* Use the atomic bitops to update the global mask */
524 for (count = 0; count < sizeof mask * 8; ++count) {
526 set_bit(count, &smp_changes_mask);
532 static void generic_set_mtrr(unsigned int reg, unsigned long base,
533 unsigned long size, mtrr_type type)
534 /* [SUMMARY] Set variable MTRR register on the local CPU.
535 <reg> The register to set.
536 <base> The base address of the region.
537 <size> The size of the region. If this is 0 the region is disabled.
538 <type> The type of the region.
543 struct mtrr_var_range *vr;
545 vr = &mtrr_state.var_ranges[reg];
547 local_irq_save(flags);
551 /* The invalid bit is kept in the mask, so we simply clear the
552 relevant mask register to disable a range. */
553 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
554 memset(vr, 0, sizeof(struct mtrr_var_range));
556 vr->base_lo = base << PAGE_SHIFT | type;
557 vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
558 vr->mask_lo = -size << PAGE_SHIFT | 0x800;
559 vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
561 mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
562 mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
566 local_irq_restore(flags);
569 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
571 unsigned long lbase, last;
573 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
574 and not touch 0x70000000->0x7003FFFF */
575 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
576 boot_cpu_data.x86_model == 1 &&
577 boot_cpu_data.x86_mask <= 7) {
578 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
579 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
582 if (!(base + size < 0x70000 || base > 0x7003F) &&
583 (type == MTRR_TYPE_WRCOMB
584 || type == MTRR_TYPE_WRBACK)) {
585 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
590 /* Check upper bits of base and last are equal and lower bits are 0
591 for base and 1 for last */
592 last = base + size - 1;
593 for (lbase = base; !(lbase & 1) && (last & 1);
594 lbase = lbase >> 1, last = last >> 1) ;
596 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
604 static int generic_have_wrcomb(void)
606 unsigned long config, dummy;
607 rdmsr(MTRRcap_MSR, config, dummy);
608 return (config & (1 << 10));
611 int positive_have_wrcomb(void)
616 /* generic structure...
618 struct mtrr_ops generic_mtrr_ops = {
620 .set_all = generic_set_all,
621 .get = generic_get_mtrr,
622 .get_free_region = generic_get_free_region,
623 .set = generic_set_mtrr,
624 .validate_add_page = generic_validate_add_page,
625 .have_wrcomb = generic_have_wrcomb,