int mmu_linear_psize = MMU_PAGE_4K;
 int mmu_virtual_psize = MMU_PAGE_4K;
 int mmu_vmalloc_psize = MMU_PAGE_4K;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif
 int mmu_io_psize = MMU_PAGE_4K;
 int mmu_kernel_ssize = MMU_SEGSIZE_256M;
 int mmu_highuser_ssize = MMU_SEGSIZE_256M;
        }
 #endif /* CONFIG_PPC_64K_PAGES */
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+       /* We try to use 16M pages for vmemmap if that is supported
+        * and we have at least 1G of RAM at boot
+        */
+       if (mmu_psize_defs[MMU_PAGE_16M].shift &&
+           lmb_phys_mem_size() >= 0x40000000)
+               mmu_vmemmap_psize = MMU_PAGE_16M;
+       else if (mmu_psize_defs[MMU_PAGE_64K].shift)
+               mmu_vmemmap_psize = MMU_PAGE_64K;
+       else
+               mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
        printk(KERN_DEBUG "Page orders: linear mapping = %d, "
-              "virtual = %d, io = %d\n",
+              "virtual = %d, io = %d"
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+              ", vmemmap = %d"
+#endif
+              "\n",
               mmu_psize_defs[mmu_linear_psize].shift,
               mmu_psize_defs[mmu_virtual_psize].shift,
-              mmu_psize_defs[mmu_io_psize].shift);
+              mmu_psize_defs[mmu_io_psize].shift
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+              ,mmu_psize_defs[mmu_vmemmap_psize].shift
+#endif
+              );
 
 #ifdef CONFIG_HUGETLB_PAGE
        /* Init large page size. Currently, we pick 16M or 1M depending
 
  *
  */
 
+#undef DEBUG
+
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 }
 
 int __meminit vmemmap_populate(struct page *start_page,
-                                       unsigned long nr_pages, int node)
+                              unsigned long nr_pages, int node)
 {
        unsigned long mode_rw;
        unsigned long start = (unsigned long)start_page;
        unsigned long end = (unsigned long)(start_page + nr_pages);
-       unsigned long page_size = 1 << mmu_psize_defs[mmu_linear_psize].shift;
+       unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 
        mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
 
                        start, p, __pa(p));
 
                mapped = htab_bolt_mapping(start, start + page_size,
-                                       __pa(p), mode_rw, mmu_linear_psize,
+                                       __pa(p), mode_rw, mmu_vmemmap_psize,
                                        mmu_kernel_ssize);
                BUG_ON(mapped < 0);
        }
 
        return 0;
 }
-#endif
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
 #include <asm/udbg.h>
 
 #ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) printk(fmt)
 #else
 #define DBG pr_debug
 #endif
        extern unsigned int *slb_miss_kernel_load_linear;
        extern unsigned int *slb_miss_kernel_load_io;
        extern unsigned int *slb_compare_rr_to_size;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+       extern unsigned int *slb_miss_kernel_load_vmemmap;
+       unsigned long vmemmap_llp;
+#endif
 
        /* Prepare our SLB miss handler based on our page size */
        linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
        io_llp = mmu_psize_defs[mmu_io_psize].sllp;
        vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
        get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
-
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+       vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
+#endif
        if (!slb_encoding_inited) {
                slb_encoding_inited = 1;
                patch_slb_encoding(slb_miss_kernel_load_linear,
 
                DBG("SLB: linear  LLP = %04lx\n", linear_llp);
                DBG("SLB: io      LLP = %04lx\n", io_llp);
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+               patch_slb_encoding(slb_miss_kernel_load_vmemmap,
+                                  SLB_VSID_KERNEL | vmemmap_llp);
+               DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
+#endif
        }
 
        get_paca()->stab_rr = SLB_NUM_BOLTED;
 
         * it to VSID 0, which is reserved as a bad VSID - one which
         * will never have any pages in it.  */
 
-       /* Check if hitting the linear mapping of the vmalloc/ioremap
-        * kernel space
+       /* Check if hitting the linear mapping or some other kernel space
        */
        bne     cr7,1f
 
 END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
        b       slb_finish_load_1T
 
-1:     /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
+1:
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+       /* Check virtual memmap region. To be patches at kernel boot */
+       cmpldi  cr0,r9,0xf
+       bne     1f
+_GLOBAL(slb_miss_kernel_load_vmemmap)
+       li      r11,0
+       b       6f
+1:
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+       /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
         * will be patched by the kernel at boot
         */
 BEGIN_FTR_SECTION
 
 extern int mmu_linear_psize;
 extern int mmu_virtual_psize;
 extern int mmu_vmalloc_psize;
+extern int mmu_vmemmap_psize;
 extern int mmu_io_psize;
 extern int mmu_kernel_ssize;
 extern int mmu_highuser_ssize;
 
 
 #define VMALLOC_REGION_ID      (REGION_ID(VMALLOC_START))
 #define KERNEL_REGION_ID       (REGION_ID(PAGE_OFFSET))
+#define VMEMMAP_REGION_ID      (0xfUL)
 #define USER_REGION_ID         (0UL)
 
 /*
- * Defines the address of the vmemap area, in the top 16th of the
- * kernel region.
+ * Defines the address of the vmemap area, in its own region
  */
-#define VMEMMAP_BASE (ASM_CONST(CONFIG_KERNEL_START) + \
-                                       (0xfUL << (REGION_SHIFT - 4)))
-#define vmemmap ((struct page *)VMEMMAP_BASE)
+#define VMEMMAP_BASE           (VMEMMAP_REGION_ID << REGION_SHIFT)
+#define vmemmap                        ((struct page *)VMEMMAP_BASE)
+
 
 /*
  * Common bits in a linux-style PTE.  These match the bits in the