]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/ppc64/mm/init.c
[PATCH] ppc64: move iSeries vio iommu init
[linux-2.6-omap-h63xx.git] / arch / ppc64 / mm / init.c
index 4b42aff74d7312e7f2cdad4aa58dcdaa13a04e1b..9edfe267123e8d40669b6becb81ece1574da6d55 100644 (file)
 #include <asm/vdso.h>
 #include <asm/imalloc.h>
 
+#if PGTABLE_RANGE > USER_VSID_RANGE
+#warning Limited user VSID range means pagetable space is wasted
+#endif
+
+#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
+#warning TASK_SIZE is smaller than it needs to be.
+#endif
+
 int mem_init_done;
 unsigned long ioremap_bot = IMALLOC_BASE;
 static unsigned long phbs_io_bot = PHBS_IO_BASE;
@@ -73,9 +81,6 @@ static unsigned long phbs_io_bot = PHBS_IO_BASE;
 extern pgd_t swapper_pg_dir[];
 extern struct task_struct *current_set[NR_CPUS];
 
-extern pgd_t ioremap_dir[];
-pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
-
 unsigned long klimit = (unsigned long)_end;
 
 unsigned long _SDR1=0;
@@ -101,7 +106,7 @@ void show_mem(void)
        printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
        for_each_pgdat(pgdat) {
                for (i = 0; i < pgdat->node_spanned_pages; i++) {
-                       page = pgdat->node_mem_map + i;
+                       page = pgdat_page_nr(pgdat, i);
                        total++;
                        if (PageReserved(page))
                                reserved++;
@@ -137,69 +142,6 @@ void iounmap(volatile void __iomem *addr)
 
 #else
 
-static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
-                                 unsigned long end)
-{
-       pte_t *pte;
-
-       pte = pte_offset_kernel(pmd, addr);
-       do {
-               pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
-               WARN_ON(!pte_none(ptent) && !pte_present(ptent));
-       } while (pte++, addr += PAGE_SIZE, addr != end);
-}
-
-static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
-                                    unsigned long end)
-{
-       pmd_t *pmd;
-       unsigned long next;
-
-       pmd = pmd_offset(pud, addr);
-       do {
-               next = pmd_addr_end(addr, end);
-               if (pmd_none_or_clear_bad(pmd))
-                       continue;
-               unmap_im_area_pte(pmd, addr, next);
-       } while (pmd++, addr = next, addr != end);
-}
-
-static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
-                                    unsigned long end)
-{
-       pud_t *pud;
-       unsigned long next;
-
-       pud = pud_offset(pgd, addr);
-       do {
-               next = pud_addr_end(addr, end);
-               if (pud_none_or_clear_bad(pud))
-                       continue;
-               unmap_im_area_pmd(pud, addr, next);
-       } while (pud++, addr = next, addr != end);
-}
-
-static void unmap_im_area(unsigned long addr, unsigned long end)
-{
-       struct mm_struct *mm = &ioremap_mm;
-       unsigned long next;
-       pgd_t *pgd;
-
-       spin_lock(&mm->page_table_lock);
-
-       pgd = pgd_offset_i(addr);
-       flush_cache_vunmap(addr, end);
-       do {
-               next = pgd_addr_end(addr, end);
-               if (pgd_none_or_clear_bad(pgd))
-                       continue;
-               unmap_im_area_pud(pgd, addr, next);
-       } while (pgd++, addr = next, addr != end);
-       flush_tlb_kernel_range(start, end);
-
-       spin_unlock(&mm->page_table_lock);
-}
-
 /*
  * map_io_page currently only called by __ioremap
  * map_io_page adds an entry to the ioremap page table
@@ -214,21 +156,21 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
        unsigned long vsid;
 
        if (mem_init_done) {
-               spin_lock(&ioremap_mm.page_table_lock);
-               pgdp = pgd_offset_i(ea);
-               pudp = pud_alloc(&ioremap_mm, pgdp, ea);
+               spin_lock(&init_mm.page_table_lock);
+               pgdp = pgd_offset_k(ea);
+               pudp = pud_alloc(&init_mm, pgdp, ea);
                if (!pudp)
                        return -ENOMEM;
-               pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
+               pmdp = pmd_alloc(&init_mm, pudp, ea);
                if (!pmdp)
                        return -ENOMEM;
-               ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
+               ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
                if (!ptep)
                        return -ENOMEM;
                pa = abs_to_phys(pa);
-               set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
+               set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
                                                          __pgprot(flags)));
-               spin_unlock(&ioremap_mm.page_table_lock);
+               spin_unlock(&init_mm.page_table_lock);
        } else {
                unsigned long va, vpn, hash, hpteg;
 
@@ -246,9 +188,10 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
                hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
                /* Panic if a pte grpup is full */
-               if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0,
-                                      _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX,
-                                      1, 0) == -1) {
+               if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
+                                      HPTE_V_BOLTED,
+                                      _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
+                   == -1) {
                        panic("map_io_page: could not insert mapping");
                }
        }
@@ -267,13 +210,9 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
 
        for (i = 0; i < size; i += PAGE_SIZE)
                if (map_io_page(ea+i, pa+i, flags))
-                       goto failure;
+                       return NULL;
 
        return (void __iomem *) (ea + (addr & ~PAGE_MASK));
- failure:
-       if (mem_init_done)
-               unmap_im_area(ea, ea + size);
-       return NULL;
 }
 
 
@@ -295,7 +234,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
         * Before that, we map using addresses going
         * up from ioremap_bot.  imalloc will use
         * the addresses from ioremap_bot through
-        * IMALLOC_END (0xE000001fffffffff)
+        * IMALLOC_END
         * 
         */
        pa = addr & PAGE_MASK;
@@ -381,19 +320,14 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
  */
 void iounmap(volatile void __iomem *token)
 {
-       unsigned long address, size;
        void *addr;
 
        if (!mem_init_done)
                return;
        
        addr = (void *) ((unsigned long __force) token & PAGE_MASK);
-       
-       if ((size = im_free(addr)) == 0)
-               return;
 
-       address = (unsigned long)addr; 
-       unmap_im_area(address, address + size);
+       im_free(addr);
 }
 
 static int iounmap_subset_regions(unsigned long addr, unsigned long size)
@@ -491,12 +425,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        int index;
        int err;
 
-#ifdef CONFIG_HUGETLB_PAGE
-       /* We leave htlb_segs as it was, but for a fork, we need to
-        * clear the huge_pgdir. */
-       mm->context.huge_pgdir = NULL;
-#endif
-
 again:
        if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
                return -ENOMEM;
@@ -527,8 +455,6 @@ void destroy_context(struct mm_struct *mm)
        spin_unlock(&mmu_context_lock);
 
        mm->context.id = NO_CONTEXT;
-
-       hugetlb_mm_free_pgd(mm);
 }
 
 /*
@@ -606,7 +532,7 @@ EXPORT_SYMBOL(page_is_ram);
  * Initialize the bootmem system and give it all the memory we
  * have available.
  */
-#ifndef CONFIG_DISCONTIGMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
 void __init do_init_bootmem(void)
 {
        unsigned long i;
@@ -628,12 +554,20 @@ void __init do_init_bootmem(void)
 
        max_pfn = max_low_pfn;
 
-       /* add all physical memory to the bootmem map. Also find the first */
+       /* Add all physical memory to the bootmem map, mark each area
+        * present.
+        */
        for (i=0; i < lmb.memory.cnt; i++) {
                unsigned long physbase, size;
+               unsigned long start_pfn, end_pfn;
 
                physbase = lmb.memory.region[i].physbase;
                size = lmb.memory.region[i].size;
+
+               start_pfn = physbase >> PAGE_SHIFT;
+               end_pfn = start_pfn + (size >> PAGE_SHIFT);
+               memory_present(0, start_pfn, end_pfn);
+
                free_bootmem(physbase, size);
        }
 
@@ -672,7 +606,7 @@ void __init paging_init(void)
        free_area_init_node(0, NODE_DATA(0), zones_size,
                            __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
 }
-#endif /* CONFIG_DISCONTIGMEM */
+#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 
 static struct kcore_list kcore_vmem;
 
@@ -703,7 +637,7 @@ module_init(setup_kcore);
 
 void __init mem_init(void)
 {
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
        int nid;
 #endif
        pg_data_t *pgdat;
@@ -714,7 +648,7 @@ void __init mem_init(void)
        num_physpages = max_low_pfn;    /* RAM is assumed contiguous */
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
-#ifdef CONFIG_DISCONTIGMEM
+#ifdef CONFIG_NEED_MULTIPLE_NODES
         for_each_online_node(nid) {
                if (NODE_DATA(nid)->node_spanned_pages != 0) {
                        printk("freeing bootmem node %x\n", nid);
@@ -729,7 +663,7 @@ void __init mem_init(void)
 
        for_each_pgdat(pgdat) {
                for (i = 0; i < pgdat->node_spanned_pages; i++) {
-                       page = pgdat->node_mem_map + i;
+                       page = pgdat_page_nr(pgdat, i);
                        if (PageReserved(page))
                                reservedpages++;
                }
@@ -752,9 +686,6 @@ void __init mem_init(void)
 
        mem_init_done = 1;
 
-#ifdef CONFIG_PPC_ISERIES
-       iommu_vio_init();
-#endif
        /* Initialize the vDSO */
        vdso_init();
 }
@@ -899,23 +830,43 @@ void __iomem * reserve_phb_iospace(unsigned long size)
        return virt_addr;
 }
 
-kmem_cache_t *zero_cache;
-
-static void zero_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
+static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
 {
-       memset(pte, 0, PAGE_SIZE);
+       memset(addr, 0, kmem_cache_size(cache));
 }
 
+static const int pgtable_cache_size[2] = {
+       PTE_TABLE_SIZE, PMD_TABLE_SIZE
+};
+static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
+       "pgd_pte_cache", "pud_pmd_cache",
+};
+
+kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
+
 void pgtable_cache_init(void)
 {
-       zero_cache = kmem_cache_create("zero",
-                               PAGE_SIZE,
-                               0,
-                               SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN,
-                               zero_ctor,
-                               NULL);
-       if (!zero_cache)
-               panic("pgtable_cache_init(): could not create zero_cache!\n");
+       int i;
+
+       BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
+       BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
+       BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
+       BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
+
+       for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
+               int size = pgtable_cache_size[i];
+               const char *name = pgtable_cache_name[i];
+
+               pgtable_cache[i] = kmem_cache_create(name,
+                                                    size, size,
+                                                    SLAB_HWCACHE_ALIGN
+                                                    | SLAB_MUST_HWCACHE_ALIGN,
+                                                    zero_ctor,
+                                                    NULL);
+               if (! pgtable_cache[i])
+                       panic("pgtable_cache_init(): could not create %s!\n",
+                             name);
+       }
 }
 
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,