2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/sysctl.h>
19 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22 #include <asm/mmu_context.h>
23 #include <asm/machdep.h>
24 #include <asm/cputable.h>
27 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
28 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
30 #ifdef CONFIG_PPC_64K_PAGES
31 #define HUGEPTE_INDEX_SIZE (PMD_SHIFT-HPAGE_SHIFT)
33 #define HUGEPTE_INDEX_SIZE (PUD_SHIFT-HPAGE_SHIFT)
35 #define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
36 #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
38 #define HUGEPD_SHIFT (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
39 #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
40 #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
42 #define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
44 /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
45 * will choke on pointers to hugepte tables, which is handy for
46 * catching screwups early. */
49 typedef struct { unsigned long pd; } hugepd_t;
51 #define hugepd_none(hpd) ((hpd).pd == 0)
53 static inline pte_t *hugepd_page(hugepd_t hpd)
55 BUG_ON(!(hpd.pd & HUGEPD_OK));
56 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
59 static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
61 unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
62 pte_t *dir = hugepd_page(*hpdp);
67 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
68 unsigned long address)
70 pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
71 GFP_KERNEL|__GFP_REPEAT);
76 spin_lock(&mm->page_table_lock);
77 if (!hugepd_none(*hpdp))
78 kmem_cache_free(huge_pgtable_cache, new);
80 hpdp->pd = (unsigned long)new | HUGEPD_OK;
81 spin_unlock(&mm->page_table_lock);
85 /* Modelled after find_linux_pte() */
86 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
91 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
95 pg = pgd_offset(mm, addr);
97 pu = pud_offset(pg, addr);
99 #ifdef CONFIG_PPC_64K_PAGES
101 pm = pmd_offset(pu, addr);
103 return hugepte_offset((hugepd_t *)pm, addr);
105 return hugepte_offset((hugepd_t *)pu, addr);
113 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
117 hugepd_t *hpdp = NULL;
119 BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
123 pg = pgd_offset(mm, addr);
124 pu = pud_alloc(mm, pg, addr);
127 #ifdef CONFIG_PPC_64K_PAGES
129 pm = pmd_alloc(mm, pu, addr);
131 hpdp = (hugepd_t *)pm;
133 hpdp = (hugepd_t *)pu;
140 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
143 return hugepte_offset(hpdp, addr);
146 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
151 static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
153 pte_t *hugepte = hugepd_page(*hpdp);
157 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
161 #ifdef CONFIG_PPC_64K_PAGES
162 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
163 unsigned long addr, unsigned long end,
164 unsigned long floor, unsigned long ceiling)
171 pmd = pmd_offset(pud, addr);
173 next = pmd_addr_end(addr, end);
176 free_hugepte_range(tlb, (hugepd_t *)pmd);
177 } while (pmd++, addr = next, addr != end);
187 if (end - 1 > ceiling - 1)
190 pmd = pmd_offset(pud, start);
192 pmd_free_tlb(tlb, pmd);
196 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
197 unsigned long addr, unsigned long end,
198 unsigned long floor, unsigned long ceiling)
205 pud = pud_offset(pgd, addr);
207 next = pud_addr_end(addr, end);
208 #ifdef CONFIG_PPC_64K_PAGES
209 if (pud_none_or_clear_bad(pud))
211 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
215 free_hugepte_range(tlb, (hugepd_t *)pud);
217 } while (pud++, addr = next, addr != end);
223 ceiling &= PGDIR_MASK;
227 if (end - 1 > ceiling - 1)
230 pud = pud_offset(pgd, start);
232 pud_free_tlb(tlb, pud);
236 * This function frees user-level page tables of a process.
238 * Must be called with pagetable lock held.
240 void hugetlb_free_pgd_range(struct mmu_gather **tlb,
241 unsigned long addr, unsigned long end,
242 unsigned long floor, unsigned long ceiling)
249 * Comments below take from the normal free_pgd_range(). They
250 * apply here too. The tests against HUGEPD_MASK below are
251 * essential, because we *don't* test for this at the bottom
252 * level. Without them we'll attempt to free a hugepte table
253 * when we unmap just part of it, even if there are other
254 * active mappings using it.
256 * The next few lines have given us lots of grief...
258 * Why are we testing HUGEPD* at this top level? Because
259 * often there will be no work to do at all, and we'd prefer
260 * not to go all the way down to the bottom just to discover
263 * Why all these "- 1"s? Because 0 represents both the bottom
264 * of the address space and the top of it (using -1 for the
265 * top wouldn't help much: the masks would do the wrong thing).
266 * The rule is that addr 0 and floor 0 refer to the bottom of
267 * the address space, but end 0 and ceiling 0 refer to the top
268 * Comparisons need to use "end - 1" and "ceiling - 1" (though
269 * that end 0 case should be mythical).
271 * Wherever addr is brought up or ceiling brought down, we
272 * must be careful to reject "the opposite 0" before it
273 * confuses the subsequent tests. But what about where end is
274 * brought down by HUGEPD_SIZE below? no, end can't go down to
277 * Whereas we round start (addr) and ceiling down, by different
278 * masks at different levels, in order to test whether a table
279 * now has no other vmas using it, so can be freed, we don't
280 * bother to round floor or end up - the tests don't need that.
290 ceiling &= HUGEPD_MASK;
294 if (end - 1 > ceiling - 1)
300 pgd = pgd_offset((*tlb)->mm, addr);
302 BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize);
303 next = pgd_addr_end(addr, end);
304 if (pgd_none_or_clear_bad(pgd))
306 hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
307 } while (pgd++, addr = next, addr != end);
310 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
311 pte_t *ptep, pte_t pte)
313 if (pte_present(*ptep)) {
314 /* We open-code pte_clear because we need to pass the right
315 * argument to hpte_need_flush (huge / !huge). Might not be
316 * necessary anymore if we make hpte_need_flush() get the
317 * page size from the slices
319 pte_update(mm, addr & HPAGE_MASK, ptep, ~0UL, 1);
321 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
324 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
327 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
332 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
337 if (get_slice_psize(mm, address) != mmu_huge_psize)
338 return ERR_PTR(-EINVAL);
340 ptep = huge_pte_offset(mm, address);
341 page = pte_page(*ptep);
343 page += (address % HPAGE_SIZE) / PAGE_SIZE;
348 int pmd_huge(pmd_t pmd)
354 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
355 pmd_t *pmd, int write)
362 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
363 unsigned long len, unsigned long pgoff,
366 return slice_get_unmapped_area(addr, len, flags,
367 mmu_huge_psize, 1, 0);
371 * Called by asm hashtable.S for doing lazy icache flush
373 static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
379 if (!pfn_valid(pte_pfn(pte)))
382 page = pte_page(pte);
385 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
387 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
388 __flush_dcache_icache(page_address(page+i));
389 set_bit(PG_arch_1, &page->flags);
397 int hash_huge_page(struct mm_struct *mm, unsigned long access,
398 unsigned long ea, unsigned long vsid, int local,
402 unsigned long old_pte, new_pte;
403 unsigned long va, rflags, pa;
407 ptep = huge_pte_offset(mm, ea);
409 /* Search the Linux page table for a match with va */
410 va = (vsid << 28) | (ea & 0x0fffffff);
413 * If no pte found or not present, send the problem up to
416 if (unlikely(!ptep || pte_none(*ptep)))
420 * Check the user's access rights to the page. If access should be
421 * prevented then send the problem up to do_page_fault.
423 if (unlikely(access & ~pte_val(*ptep)))
426 * At this point, we have a pte (old_pte) which can be used to build
427 * or update an HPTE. There are 2 cases:
429 * 1. There is a valid (present) pte with no associated HPTE (this is
430 * the most common case)
431 * 2. There is a valid (present) pte with an associated HPTE. The
432 * current values of the pp bits in the HPTE prevent access
433 * because we are doing software DIRTY bit management and the
434 * page is currently not DIRTY.
439 old_pte = pte_val(*ptep);
440 if (old_pte & _PAGE_BUSY)
442 new_pte = old_pte | _PAGE_BUSY |
443 _PAGE_ACCESSED | _PAGE_HASHPTE;
444 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
447 rflags = 0x2 | (!(new_pte & _PAGE_RW));
448 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
449 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
450 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
451 /* No CPU has hugepages but lacks no execute, so we
452 * don't need to worry about that case */
453 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
456 /* Check if pte already has an hpte (case 2) */
457 if (unlikely(old_pte & _PAGE_HASHPTE)) {
458 /* There MIGHT be an HPTE for this pte */
459 unsigned long hash, slot;
461 hash = hpt_hash(va, HPAGE_SHIFT);
462 if (old_pte & _PAGE_F_SECOND)
464 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
465 slot += (old_pte & _PAGE_F_GIX) >> 12;
467 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
469 old_pte &= ~_PAGE_HPTEFLAGS;
472 if (likely(!(old_pte & _PAGE_HASHPTE))) {
473 unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
474 unsigned long hpte_group;
476 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
479 hpte_group = ((hash & htab_hash_mask) *
480 HPTES_PER_GROUP) & ~0x7UL;
482 /* clear HPTE slot informations in new PTE */
483 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
485 /* Add in WIMG bits */
486 /* XXX We should store these in the pte */
487 /* --BenH: I think they are ... */
488 rflags |= _PAGE_COHERENT;
490 /* Insert into the hash table, primary slot */
491 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
494 /* Primary is full, try the secondary */
495 if (unlikely(slot == -1)) {
496 hpte_group = ((~hash & htab_hash_mask) *
497 HPTES_PER_GROUP) & ~0x7UL;
498 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
503 hpte_group = ((hash & htab_hash_mask) *
504 HPTES_PER_GROUP)&~0x7UL;
506 ppc_md.hpte_remove(hpte_group);
511 if (unlikely(slot == -2))
512 panic("hash_huge_page: pte_insert failed\n");
514 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
518 * No need to use ldarx/stdcx here
520 *ptep = __pte(new_pte & ~_PAGE_BUSY);
528 static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
530 memset(addr, 0, kmem_cache_size(cache));
533 static int __init hugetlbpage_init(void)
535 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
538 huge_pgtable_cache = kmem_cache_create("hugepte_cache",
543 if (! huge_pgtable_cache)
544 panic("hugetlbpage_init(): could not create hugepte cache\n");
549 module_init(hugetlbpage_init);