]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/powerpc/mm/hugetlbpage.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[linux-2.6-omap-h63xx.git] / arch / powerpc / mm / hugetlbpage.c
index fb42c4dd32177327389dab8881d2b6cb8b681cb8..a117024ab8cdb68af98c8bfc13f822695dc4c2bf 100644 (file)
@@ -113,7 +113,7 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
                           unsigned long address, unsigned int psize)
 {
-       pte_t *new = kmem_cache_alloc(huge_pgtable_cache(psize),
+       pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize),
                                      GFP_KERNEL|__GFP_REPEAT);
 
        if (! new)
@@ -128,29 +128,37 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
        return 0;
 }
 
-/* Base page size affects how we walk hugetlb page tables */
-#ifdef CONFIG_PPC_64K_PAGES
-#define hpmd_offset(pud, addr, h)      pmd_offset(pud, addr)
-#define hpmd_alloc(mm, pud, addr, h)   pmd_alloc(mm, pud, addr)
-#else
-static inline
-pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
+
+static pud_t *hpud_offset(pgd_t *pgd, unsigned long addr, struct hstate *hstate)
+{
+       if (huge_page_shift(hstate) < PUD_SHIFT)
+               return pud_offset(pgd, addr);
+       else
+               return (pud_t *) pgd;
+}
+static pud_t *hpud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long addr,
+                        struct hstate *hstate)
+{
+       if (huge_page_shift(hstate) < PUD_SHIFT)
+               return pud_alloc(mm, pgd, addr);
+       else
+               return (pud_t *) pgd;
+}
+static pmd_t *hpmd_offset(pud_t *pud, unsigned long addr, struct hstate *hstate)
 {
-       if (huge_page_shift(hstate) == PAGE_SHIFT_64K)
+       if (huge_page_shift(hstate) < PMD_SHIFT)
                return pmd_offset(pud, addr);
        else
                return (pmd_t *) pud;
 }
-static inline
-pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
-                 struct hstate *hstate)
+static pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr,
+                        struct hstate *hstate)
 {
-       if (huge_page_shift(hstate) == PAGE_SHIFT_64K)
+       if (huge_page_shift(hstate) < PMD_SHIFT)
                return pmd_alloc(mm, pud, addr);
        else
                return (pmd_t *) pud;
 }
-#endif
 
 /* Build list of addresses of gigantic pages.  This function is used in early
  * boot before the buddy or bootmem allocator is setup.
@@ -204,7 +212,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 
        pg = pgd_offset(mm, addr);
        if (!pgd_none(*pg)) {
-               pu = pud_offset(pg, addr);
+               pu = hpud_offset(pg, addr, hstate);
                if (!pud_none(*pu)) {
                        pm = hpmd_offset(pu, addr, hstate);
                        if (!pmd_none(*pm))
@@ -233,7 +241,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
        addr &= hstate->mask;
 
        pg = pgd_offset(mm, addr);
-       pu = pud_alloc(mm, pg, addr);
+       pu = hpud_alloc(mm, pg, addr, hstate);
 
        if (pu) {
                pm = hpmd_alloc(mm, pu, addr, hstate);
@@ -316,13 +324,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
        pud = pud_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
-#ifdef CONFIG_PPC_64K_PAGES
-               if (pud_none_or_clear_bad(pud))
-                       continue;
-               hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling,
-                                      psize);
-#else
-               if (shift == PAGE_SHIFT_64K) {
+               if (shift < PMD_SHIFT) {
                        if (pud_none_or_clear_bad(pud))
                                continue;
                        hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
@@ -332,7 +334,6 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
                                continue;
                        free_hugepte_range(tlb, (hugepd_t *)pud, psize);
                }
-#endif
        } while (pud++, addr = next, addr != end);
 
        start &= PGDIR_MASK;
@@ -422,9 +423,15 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
                psize = get_slice_psize(tlb->mm, addr);
                BUG_ON(!mmu_huge_psizes[psize]);
                next = pgd_addr_end(addr, end);
-               if (pgd_none_or_clear_bad(pgd))
-                       continue;
-               hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+               if (mmu_psize_to_shift(psize) < PUD_SHIFT) {
+                       if (pgd_none_or_clear_bad(pgd))
+                               continue;
+                       hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+               } else {
+                       if (pgd_none(*pgd))
+                               continue;
+                       free_hugepte_range(tlb, (hugepd_t *)pgd, psize);
+               }
        } while (pgd++, addr = next, addr != end);
 }
 
@@ -730,25 +737,27 @@ static int __init hugepage_setup_sz(char *str)
 }
 __setup("hugepagesz=", hugepage_setup_sz);
 
-static void zero_ctor(struct kmem_cache *cache, void *addr)
-{
-       memset(addr, 0, kmem_cache_size(cache));
-}
-
 static int __init hugetlbpage_init(void)
 {
        unsigned int psize;
 
        if (!cpu_has_feature(CPU_FTR_16M_PAGE))
                return -ENODEV;
+
        /* Add supported huge page sizes.  Need to change HUGE_MAX_HSTATE
         * and adjust PTE_NONCACHE_NUM if the number of supported huge page
         * sizes changes.
         */
        set_huge_psize(MMU_PAGE_16M);
-       set_huge_psize(MMU_PAGE_64K);
        set_huge_psize(MMU_PAGE_16G);
 
+       /* Temporarily disable support for 64K huge pages when 64K SPU local
+        * store support is enabled as the current implementation conflicts.
+        */
+#ifndef CONFIG_SPU_FS_64K_LS
+       set_huge_psize(MMU_PAGE_64K);
+#endif
+
        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
                if (mmu_huge_psizes[psize]) {
                        huge_pgtable_cache(psize) = kmem_cache_create(
@@ -756,7 +765,7 @@ static int __init hugetlbpage_init(void)
                                                HUGEPTE_TABLE_SIZE(psize),
                                                HUGEPTE_TABLE_SIZE(psize),
                                                0,
-                                               zero_ctor);
+                                               NULL);
                        if (!huge_pgtable_cache(psize))
                                panic("hugetlbpage_init(): could not create %s"\
                                      "\n", HUGEPTE_CACHE_NAME(psize));