X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-ia64%2Fpgtable.h;h=c0f8144f23497ae0f95804148f19206b170393c8;hb=36ddf5bbdea7ba4582abc62f106f0f0e9f0b6b91;hp=21e32a06bc82c110e4870358b0d5e913a588ae1a;hpb=c7fb577e2a6cb04732541f2dc402bd46747f7558;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 21e32a06bc8..c0f8144f234 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -84,32 +84,55 @@ #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED /* - * Definitions for first level: - * - * PGDIR_SHIFT determines what a first-level page table entry can map. + * How many pointers will a page table level hold expressed in shift */ -#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) -#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) -#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ -#define FIRST_USER_ADDRESS 0 +#define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3) /* - * Definitions for second level: + * Definitions for fourth level: + */ +#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) + +/* + * Definitions for third level: * - * PMD_SHIFT determines the size of the area a second-level page table + * PMD_SHIFT determines the size of the area a third-level page table * can map. */ -#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) +#define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) -#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) +#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) +#ifdef CONFIG_PGTABLE_4 /* - * Definitions for third level: + * Definitions for second level: + * + * PUD_SHIFT determines the size of the area a second-level page table + * can map. */ -#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3)) +#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) +#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) +#endif + +/* + * Definitions for first level: + * + * PGDIR_SHIFT determines what a first-level page table entry can map. + */ +#ifdef CONFIG_PGTABLE_4 +#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) +#else +#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) +#endif +#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT +#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) +#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ +#define FIRST_USER_ADDRESS 0 /* * All the normal masks have the "page accessed" bits on, as any time @@ -127,6 +150,7 @@ # ifndef __ASSEMBLY__ +#include /* for mm_struct */ #include #include #include @@ -160,6 +184,9 @@ #define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) +#ifdef CONFIG_PGTABLE_4 +#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#endif #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) @@ -217,6 +244,9 @@ ia64_phys_addr_valid (unsigned long addr) #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE)) +#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3) +#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */ + /* * Conversion functions: convert page frame number (pfn) and a protection value to a page * table entry (pte). @@ -253,9 +283,16 @@ ia64_phys_addr_valid (unsigned long addr) #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) #define pud_present(pud) (pud_val(pud) != 0UL) #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) - #define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) +#ifdef CONFIG_PGTABLE_4 +#define pgd_none(pgd) (!pgd_val(pgd)) +#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd))) +#define pgd_present(pgd) (pgd_val(pgd) != 0UL) +#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) +#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) +#endif + /* * The following have defined behavior only work if pte_present() is true. */ @@ -277,7 +314,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) -#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P)) +#define pte_mkhuge(pte) (__pte(pte_val(pte))) /* * Macro to a page protection value as "uncacheable". Note that "protection" is really a @@ -323,7 +360,13 @@ pgd_offset (struct mm_struct *mm, unsigned long address) here. */ #define pgd_offset_gate(mm, addr) pgd_offset_k(addr) +#ifdef CONFIG_PGTABLE_4 /* Find an entry in the second-level page table.. */ +#define pud_offset(dir,addr) \ + ((pud_t *) pgd_page(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) +#endif + +/* Find an entry in the third-level page table.. */ #define pmd_offset(dir,addr) \ ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) @@ -462,9 +505,6 @@ extern struct page *zero_page_memmap_ptr; #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) -struct mmu_gather; -void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, - unsigned long end, unsigned long floor, unsigned long ceiling); #endif /* @@ -556,7 +596,9 @@ do { \ #define __HAVE_ARCH_PGD_OFFSET_GATE #define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE +#ifndef CONFIG_PGTABLE_4 #include +#endif #include #endif /* _ASM_IA64_PGTABLE_H */