}
 EXPORT_SYMBOL(iounmap);
 
+static __initdata int after_paging_init;
+static __initdata unsigned long bm_pte[1024]
+                               __attribute__((aligned(PAGE_SIZE)));
+
+static inline unsigned long * __init bt_ioremap_pgd(unsigned long addr)
+{
+       return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
+}
+
+static inline unsigned long * __init bt_ioremap_pte(unsigned long addr)
+{
+       return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
+}
+
+void __init bt_ioremap_init(void)
+{
+       unsigned long *pgd;
+
+       pgd = bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
+       *pgd = __pa(bm_pte) | _PAGE_TABLE;
+       memset(bm_pte, 0, sizeof(bm_pte));
+       BUG_ON(pgd != bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
+}
+
+void __init bt_ioremap_clear(void)
+{
+       unsigned long *pgd;
+
+       pgd = bt_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
+       *pgd = 0;
+       __flush_tlb_all();
+}
+
+void __init bt_ioremap_reset(void)
+{
+       enum fixed_addresses idx;
+       unsigned long *pte, phys, addr;
+
+       after_paging_init = 1;
+       for (idx = FIX_BTMAP_BEGIN; idx <= FIX_BTMAP_END; idx--) {
+               addr = fix_to_virt(idx);
+               pte = bt_ioremap_pte(addr);
+               if (!*pte & _PAGE_PRESENT) {
+                       phys = *pte & PAGE_MASK;
+                       set_fixmap(idx, phys);
+               }
+       }
+}
+
+static void __init __bt_set_fixmap(enum fixed_addresses idx,
+                                  unsigned long phys, pgprot_t flags)
+{
+       unsigned long *pte, addr = __fix_to_virt(idx);
+
+       if (idx >= __end_of_fixed_addresses) {
+               BUG();
+               return;
+       }
+       pte = bt_ioremap_pte(addr);
+       if (pgprot_val(flags))
+               *pte = (phys & PAGE_MASK) | pgprot_val(flags);
+       else
+               *pte = 0;
+       __flush_tlb_one(addr);
+}
+
+static inline void __init bt_set_fixmap(enum fixed_addresses idx,
+                                       unsigned long phys)
+{
+       if (after_paging_init)
+               set_fixmap(idx, phys);
+       else
+               __bt_set_fixmap(idx, phys, PAGE_KERNEL);
+}
+
+static inline void __init bt_clear_fixmap(enum fixed_addresses idx)
+{
+       if (after_paging_init)
+               clear_fixmap(idx);
+       else
+               __bt_set_fixmap(idx, 0, __pgprot(0));
+}
+
 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
 {
        unsigned long offset, last_addr;
         */
        idx = FIX_BTMAP_BEGIN;
        while (nrpages > 0) {
-               set_fixmap(idx, phys_addr);
+               bt_set_fixmap(idx, phys_addr);
                phys_addr += PAGE_SIZE;
                --idx;
                --nrpages;
 
        idx = FIX_BTMAP_BEGIN;
        while (nrpages > 0) {
-               clear_fixmap(idx);
+               bt_clear_fixmap(idx);
                --idx;
                --nrpages;
        }
 
  * mappings, before the real ioremap() is functional.
  * A boot-time mapping is currently limited to at most 16 pages.
  */
+extern void bt_ioremap_init(void);
+extern void bt_ioremap_clear(void);
+extern void bt_ioremap_reset(void);
 extern void *bt_ioremap(unsigned long offset, unsigned long size);
 extern void bt_iounmap(void *addr, unsigned long size);
 extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);