return NULL;
}
- prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
- | _PAGE_ACCESSED | flags);
+ prot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
/*
* Mappings have to be page-aligned
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
unsigned long last_addr;
- void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
+ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
if (!p)
return p;
}
EXPORT_SYMBOL(iounmap);
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+
+int __initdata early_ioremap_debug;
+
+static int __init early_ioremap_debug_setup(char *str)
+{
+ early_ioremap_debug = 1;
+
+ return 0;
+}
+early_param("early_ioremap_debug", early_ioremap_debug_setup);
+
+static __initdata int after_paging_init;
+static __initdata unsigned long bm_pte[1024]
+ __attribute__((aligned(PAGE_SIZE)));
+
+static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
+{
+ return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
+}
+
+static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
+{
+ return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
+}
+
+void __init early_ioremap_init(void)
+{
+ unsigned long *pgd;
+
+ if (early_ioremap_debug)
+ printk("early_ioremap_init()\n");
+
+ pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
+ *pgd = __pa(bm_pte) | _PAGE_TABLE;
+ memset(bm_pte, 0, sizeof(bm_pte));
+ /*
+ * The boot-ioremap range spans multiple pgds, for which
+ * we are not prepared:
+ */
+ if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
+ WARN_ON(1);
+ printk("pgd %p != %p\n",
+ pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
+ printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ fix_to_virt(FIX_BTMAP_BEGIN));
+ printk("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ fix_to_virt(FIX_BTMAP_END));
+
+ printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
+ }
+}
+
+void __init early_ioremap_clear(void)
+{
+ unsigned long *pgd;
+
+ if (early_ioremap_debug)
+ printk("early_ioremap_clear()\n");
+
+ pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
+ *pgd = 0;
+ __flush_tlb_all();
+}
+
+void __init early_ioremap_reset(void)
{
- unsigned long offset, last_addr;
- unsigned int nrpages;
enum fixed_addresses idx;
+ unsigned long *pte, phys, addr;
+
+ after_paging_init = 1;
+ for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
+ addr = fix_to_virt(idx);
+ pte = early_ioremap_pte(addr);
+ if (!*pte & _PAGE_PRESENT) {
+ phys = *pte & PAGE_MASK;
+ set_fixmap(idx, phys);
+ }
+ }
+}
+
+static void __init __early_set_fixmap(enum fixed_addresses idx,
+ unsigned long phys, pgprot_t flags)
+{
+ unsigned long *pte, addr = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+ pte = early_ioremap_pte(addr);
+ if (pgprot_val(flags))
+ *pte = (phys & PAGE_MASK) | pgprot_val(flags);
+ else
+ *pte = 0;
+ __flush_tlb_one(addr);
+}
+
+static inline void __init early_set_fixmap(enum fixed_addresses idx,
+ unsigned long phys)
+{
+ if (after_paging_init)
+ set_fixmap(idx, phys);
+ else
+ __early_set_fixmap(idx, phys, PAGE_KERNEL);
+}
+
+static inline void __init early_clear_fixmap(enum fixed_addresses idx)
+{
+ if (after_paging_init)
+ clear_fixmap(idx);
+ else
+ __early_set_fixmap(idx, 0, __pgprot(0));
+}
+
+
+int __initdata early_ioremap_nested;
+
+static int __init check_early_ioremap_leak(void)
+{
+ if (!early_ioremap_nested)
+ return 0;
+
+ printk(KERN_WARNING
+ "Debug warning: early ioremap leak of %d areas detected.\n",
+ early_ioremap_nested);
+ printk(KERN_WARNING
+ "please boot with early_ioremap_debug and report the dmesg.\n");
+ WARN_ON(1);
+
+ return 1;
+}
+late_initcall(check_early_ioremap_leak);
+
+void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
+{
+ unsigned long offset, last_addr;
+ unsigned int nrpages, nesting;
+ enum fixed_addresses idx0, idx;
+
+ WARN_ON(system_state != SYSTEM_BOOTING);
+
+ nesting = early_ioremap_nested;
+ if (early_ioremap_debug) {
+ printk("early_ioremap(%08lx, %08lx) [%d] => ",
+ phys_addr, size, nesting);
+ dump_stack();
+ }
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
+ if (!size || last_addr < phys_addr) {
+ WARN_ON(1);
return NULL;
+ }
- /*
- * Don't remap the low PCI/ISA area, it's always mapped..
- */
- if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
- return phys_to_virt(phys_addr);
-
+ if (nesting >= FIX_BTMAPS_NESTING) {
+ WARN_ON(1);
+ return NULL;
+ }
+ early_ioremap_nested++;
/*
* Mappings have to be page-aligned
*/
* Mappings have to fit in the FIX_BTMAP area.
*/
nrpages = size >> PAGE_SHIFT;
- if (nrpages > NR_FIX_BTMAPS)
+ if (nrpages > NR_FIX_BTMAPS) {
+ WARN_ON(1);
return NULL;
+ }
/*
* Ok, go for it..
*/
- idx = FIX_BTMAP_BEGIN;
+ idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
+ idx = idx0;
while (nrpages > 0) {
- set_fixmap(idx, phys_addr);
+ early_set_fixmap(idx, phys_addr);
phys_addr += PAGE_SIZE;
--idx;
--nrpages;
}
- return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
+ if (early_ioremap_debug)
+ printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
+
+ return (void*) (offset + fix_to_virt(idx0));
}
-void __init bt_iounmap(void *addr, unsigned long size)
+void __init early_iounmap(void *addr, unsigned long size)
{
unsigned long virt_addr;
unsigned long offset;
unsigned int nrpages;
enum fixed_addresses idx;
+ unsigned int nesting;
+
+ nesting = --early_ioremap_nested;
+ WARN_ON(nesting < 0);
+
+ if (early_ioremap_debug) {
+ printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting);
+ dump_stack();
+ }
virt_addr = (unsigned long)addr;
- if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
+ WARN_ON(1);
return;
+ }
offset = virt_addr & ~PAGE_MASK;
nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
- idx = FIX_BTMAP_BEGIN;
+ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
while (nrpages > 0) {
- clear_fixmap(idx);
+ early_clear_fixmap(idx);
--idx;
--nrpages;
}
}
+
+void __this_fixmap_does_not_exist(void)
+{
+ WARN_ON(1);
+}