#include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
 
-/* called with the page_table_lock held */
 static inline void 
 remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 
               unsigned long phys_addr, unsigned long flags)
        } while (address && (address < end));
 }
 
-/* called with the page_table_lock held */
 static inline int 
 remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, 
               unsigned long phys_addr, unsigned long flags)
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, 
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pmd_t *pmd;
                pmd = pmd_alloc(&init_mm, dir, address);
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        return error;
 }
 
 
        pte_t *pte;
        int ret = 0;
 
-       spin_lock(&init_mm.page_table_lock);
-
        do {
                pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
                pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
                }
                WARN_ON(!pmd_none(*pmd));
 
-               pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE);
+               pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
                if (!pte) {
                        printk(KERN_ERR "%s: no pte tables\n", __func__);
                        ret = -ENOMEM;
                consistent_pte = pte;
        } while (0);
 
-       spin_unlock(&init_mm.page_table_lock);
-
        return ret;
 }
 
 
 
        pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
        phys_addr -= address;
        dir = pgd_offset(&init_mm, address);
        BUG_ON(address >= end);
-       spin_lock(&init_mm.page_table_lock);
        do {
                pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
                if (!pmd) {
                dir++;
        } while (address && (address < end));
 
-       spin_unlock(&init_mm.page_table_lock);
        flush_cache_vmap(start, end);
        return err;
 }
 
        if (!new_pmd)
                goto no_pmd;
 
-       new_pte = pte_alloc_kernel(mm, new_pmd, 0);
+       new_pte = pte_alloc_map(mm, new_pmd, 0);
        if (!new_pte)
                goto no_pte;
 
        init_pte = pte_offset(init_pmd, 0);
 
        set_pte(new_pte, *init_pte);
+       pte_unmap(new_pte);
 
        /*
         * the page table entries are zeroed
 
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, prot);
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pud_t *pud;
                pmd_t *pmd;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return error;
 }
 
        pte_t *pte;
        int err = -ENOMEM;
 
-       spin_lock(&init_mm.page_table_lock);
-
        /* Use upper 10 bits of VA to index the first level map */
        pge = pgd_offset_k(va);
        pue = pud_offset(pge, va);
        pme = pmd_offset(pue, va);
 
        /* Use middle 10 bits of VA to index the second-level map */
-       pte = pte_alloc_kernel(&init_mm, pme, va);
+       pte = pte_alloc_kernel(pme, va);
        if (pte != 0) {
                err = 0;
                set_pte(pte, mk_pte_phys(pa & PAGE_MASK, prot));
        }
 
-       spin_unlock(&init_mm.page_table_lock);
        return err;
 }
 
 
        unsigned long pfn;
 
        pfn = phys_addr >> PAGE_SHIFT;
-       pte = pte_alloc_kernel(&init_mm, pmd, addr);
+       pte = pte_alloc_kernel(pmd, addr);
        if (!pte)
                return -ENOMEM;
        do {
        flush_cache_all();
        phys_addr -= addr;
        pgd = pgd_offset_k(addr);
-       spin_lock(&init_mm.page_table_lock);
        do {
                next = pgd_addr_end(addr, end);
                err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return err;
 }
 
 
        pgd = pgd_offset_k(address);            /* note: this is NOT pgd_offset()! */
 
-       spin_lock(&init_mm.page_table_lock);
        {
                pud = pud_alloc(&init_mm, pgd, address);
                if (!pud)
                        goto out;
-
                pmd = pmd_alloc(&init_mm, pud, address);
                if (!pmd)
                        goto out;
-               pte = pte_alloc_map(&init_mm, pmd, address);
+               pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        goto out;
-               if (!pte_none(*pte)) {
-                       pte_unmap(pte);
+               if (!pte_none(*pte))
                        goto out;
-               }
                set_pte(pte, mk_pte(page, pgprot));
-               pte_unmap(pte);
        }
-  out: spin_unlock(&init_mm.page_table_lock);
+  out:
        /* no need for flush_tlb */
        return page;
 }
 
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pmd_t *pmd;
                pmd = pmd_alloc(&init_mm, dir, address);
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return error;
 }
 
                        virtaddr += PTRTREESIZE;
                        size -= PTRTREESIZE;
                } else {
-                       pte_dir = pte_alloc_kernel(&init_mm, pmd_dir, virtaddr);
+                       pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
                        if (!pte_dir) {
                                printk("ioremap: no mem for pte_dir\n");
                                return NULL;
 
                        pte_t *pte;
                        unsigned long end3;
 
-                       if((pte = pte_alloc_kernel(&init_mm, pmd, vaddr)) == NULL) {
+                       if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
                                ret = -ENOMEM;
                                goto out;
                        }
 
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pud_t *pud;
                pmd_t *pmd;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return error;
 }
 
        if (end > PGDIR_SIZE)
                end = PGDIR_SIZE;
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr);
+               pte_t * pte = pte_alloc_kernel(pmd, vaddr);
                if (!pte)
                        return -ENOMEM;
                if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
 
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(NULL, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pmd_t *pmd;
-               pmd = pmd_alloc(dir, address);
+               pmd = pmd_alloc(&init_mm, dir, address);
                error = -ENOMEM;
                if (!pmd)
                        break;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return error;
 }
 
        pte_t *pte;
        int ret = 0;
 
-       spin_lock(&init_mm.page_table_lock);
-
        do {
                pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
                pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
                }
                WARN_ON(!pmd_none(*pmd));
 
-               pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE);
+               pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
                if (!pte) {
                        printk(KERN_ERR "%s: no pte tables\n", __func__);
                        ret = -ENOMEM;
                consistent_pte = pte;
        } while (0);
 
-       spin_unlock(&init_mm.page_table_lock);
-
        return ret;
 }
 
 
                pmd_t *pmdp;
                unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
-               spin_lock(&init_mm.page_table_lock);
                pmdp = pmd_offset(pgd_offset_k(v), v);
                pmd_val(*pmdp++) = val;
                pmd_val(*pmdp++) = val;
                pmd_val(*pmdp++) = val;
                pmd_val(*pmdp++) = val;
-               spin_unlock(&init_mm.page_table_lock);
 
                v += LARGE_PAGE_SIZE_16M;
                p += LARGE_PAGE_SIZE_16M;
                pmd_t *pmdp;
                unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
 
-               spin_lock(&init_mm.page_table_lock);
                pmdp = pmd_offset(pgd_offset_k(v), v);
                pmd_val(*pmdp) = val;
-               spin_unlock(&init_mm.page_table_lock);
 
                v += LARGE_PAGE_SIZE_4M;
                p += LARGE_PAGE_SIZE_4M;
 
        pte_t *pg;
        int err = -ENOMEM;
 
-       spin_lock(&init_mm.page_table_lock);
        /* Use upper 10 bits of VA to index the first level map */
        pd = pmd_offset(pgd_offset_k(va), va);
        /* Use middle 10 bits of VA to index the second-level map */
-       pg = pte_alloc_kernel(&init_mm, pd, va);
+       pg = pte_alloc_kernel(pd, va);
        if (pg != 0) {
                err = 0;
                set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags)));
                if (mem_init_done)
                        flush_HPTE(0, va, pmd_val(*pd));
        }
-       spin_unlock(&init_mm.page_table_lock);
        return err;
 }
 
 
        for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
                if (tmp->addr == addr) {
                        *p = tmp->next;
-
-                       /* XXX: do we need the lock? */
-                       spin_lock(&init_mm.page_table_lock);
                        unmap_vm_area(tmp);
-                       spin_unlock(&init_mm.page_table_lock);
-
                        kfree(tmp);
                        up(&imlist_sem);
                        return;
 
        unsigned long vsid;
 
        if (mem_init_done) {
-               spin_lock(&init_mm.page_table_lock);
                pgdp = pgd_offset_k(ea);
                pudp = pud_alloc(&init_mm, pgdp, ea);
                if (!pudp)
                pmdp = pmd_alloc(&init_mm, pudp, ea);
                if (!pmdp)
                        return -ENOMEM;
-               ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
+               ptep = pte_alloc_kernel(pmdp, ea);
                if (!ptep)
                        return -ENOMEM;
                set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
                                                          __pgprot(flags)));
-               spin_unlock(&init_mm.page_table_lock);
        } else {
                unsigned long va, vpn, hash, hpteg;
 
 
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pmd_t *pmd;
                pmd = pmd_alloc(&init_mm, dir, address);
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return 0;
 }
 
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pmd_t *pmd;
                pmd = pmd_alloc(&init_mm, dir, address);
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return error;
 }
 
                BUG();
 
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
                error = -ENOMEM;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return 0;
 }
 
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
                pud_t *pud;
                pud = pud_alloc(&init_mm, pgd, address);
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                pgd++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return error;
 }
 
 extern int vmtruncate(struct inode * inode, loff_t offset);
 extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
 extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address));
-extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
+extern pte_t *FASTCALL(pte_alloc_kernel(pmd_t *pmd, unsigned long address));
 extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
 extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
 extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
 
        return pte_offset_map(pmd, address);
 }
 
-pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
 {
        if (!pmd_present(*pmd)) {
                pte_t *new;
 
-               spin_unlock(&mm->page_table_lock);
-               new = pte_alloc_one_kernel(mm, address);
-               spin_lock(&mm->page_table_lock);
+               new = pte_alloc_one_kernel(&init_mm, address);
                if (!new)
                        return NULL;
 
-               /*
-                * Because we dropped the lock, we should re-check the
-                * entry, as somebody else could have populated it..
-                */
-               if (pmd_present(*pmd)) {
+               spin_lock(&init_mm.page_table_lock);
+               if (pmd_present(*pmd))
                        pte_free_kernel(new);
-                       goto out;
-               }
-               pmd_populate_kernel(mm, pmd, new);
+               else
+                       pmd_populate_kernel(&init_mm, pmd, new);
+               spin_unlock(&init_mm.page_table_lock);
        }
-out:
        return pte_offset_kernel(pmd, address);
 }
 
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
  * Allocate page upper directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
+ * We've already handled the fast-path in-line.
  */
 pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 {
        pud_t *new;
 
-       spin_unlock(&mm->page_table_lock);
+       if (mm != &init_mm)             /* Temporary bridging hack */
+               spin_unlock(&mm->page_table_lock);
        new = pud_alloc_one(mm, address);
-       spin_lock(&mm->page_table_lock);
-       if (!new)
+       if (!new) {
+               if (mm != &init_mm)     /* Temporary bridging hack */
+                       spin_lock(&mm->page_table_lock);
                return NULL;
+       }
 
-       /*
-        * Because we dropped the lock, we should re-check the
-        * entry, as somebody else could have populated it..
-        */
+       spin_lock(&mm->page_table_lock);
        if (pgd_present(*pgd)) {
                pud_free(new);
                goto out;
        }
        pgd_populate(mm, pgd, new);
  out:
+       if (mm == &init_mm)             /* Temporary bridging hack */
+               spin_unlock(&mm->page_table_lock);
        return pud_offset(pgd, address);
 }
 #endif /* __PAGETABLE_PUD_FOLDED */
 #ifndef __PAGETABLE_PMD_FOLDED
 /*
  * Allocate page middle directory.
- *
- * We've already handled the fast-path in-line, and we own the
- * page table lock.
+ * We've already handled the fast-path in-line.
  */
 pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
        pmd_t *new;
 
-       spin_unlock(&mm->page_table_lock);
+       if (mm != &init_mm)             /* Temporary bridging hack */
+               spin_unlock(&mm->page_table_lock);
        new = pmd_alloc_one(mm, address);
-       spin_lock(&mm->page_table_lock);
-       if (!new)
+       if (!new) {
+               if (mm != &init_mm)     /* Temporary bridging hack */
+                       spin_lock(&mm->page_table_lock);
                return NULL;
+       }
 
-       /*
-        * Because we dropped the lock, we should re-check the
-        * entry, as somebody else could have populated it..
-        */
+       spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
        if (pud_present(*pud)) {
                pmd_free(new);
 #endif /* __ARCH_HAS_4LEVEL_HACK */
 
  out:
+       if (mm == &init_mm)             /* Temporary bridging hack */
+               spin_unlock(&mm->page_table_lock);
        return pmd_offset(pud, address);
 }
 #endif /* __PAGETABLE_PMD_FOLDED */
 
 {
        pte_t *pte;
 
-       pte = pte_alloc_kernel(&init_mm, pmd, addr);
+       pte = pte_alloc_kernel(pmd, addr);
        if (!pte)
                return -ENOMEM;
        do {
 
        BUG_ON(addr >= end);
        pgd = pgd_offset_k(addr);
-       spin_lock(&init_mm.page_table_lock);
        do {
                next = pgd_addr_end(addr, end);
                err = vmap_pud_range(pgd, addr, next, prot, pages);
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
-       spin_unlock(&init_mm.page_table_lock);
        flush_cache_vmap((unsigned long) area->addr, end);
        return err;
 }