X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-s390%2Fpgtable.h;h=13c16546eff5c728d577366db2e8758201578e51;hb=1be9ab056e94e23a307b8bfaacc38403b3b5a352;hp=1a07028d575e5570648fe8bf173a02136f130408;hpb=28eb177dfa5982d132edceed891cb3885df258bb;p=linux-2.6-omap-h63xx.git diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 1a07028d575..13c16546eff 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -31,15 +31,16 @@ * the S390 page table tree. */ #ifndef __ASSEMBLY__ +#include #include #include -#include struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ struct mm_struct; extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); extern void paging_init(void); +extern void vmem_map_init(void); /* * The S390 doesn't have any external MMU info: the kernel page @@ -107,16 +108,27 @@ extern char empty_zero_page[PAGE_SIZE]; * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ +extern unsigned long vmalloc_end; #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \ & ~(VMALLOC_OFFSET-1)) +#define VMALLOC_END vmalloc_end + +/* + * We need some free virtual space to be able to do vmalloc. + * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc + * area. On a machine with 2GB memory we make sure that we + * have at least 128MB free space for vmalloc. On a machine + * with 4TB we make sure we have at least 128GB. + */ #ifndef __s390x__ -# define VMALLOC_END (0x7fffffffL) +#define VMALLOC_MIN_SIZE 0x8000000UL +#define VMALLOC_END_INIT 0x80000000UL #else /* __s390x__ */ -# define VMALLOC_END (0x40000000000L) +#define VMALLOC_MIN_SIZE 0x2000000000UL +#define VMALLOC_END_INIT 0x40000000000UL #endif /* __s390x__ */ - /* * A 31 bit pagetable entry of S390 has following format: * | PFRA | | OS | @@ -200,17 +212,48 @@ extern char empty_zero_page[PAGE_SIZE]; */ /* Hardware bits in the page table entry */ -#define _PAGE_RO 0x200 /* HW read-only */ -#define _PAGE_INVALID 0x400 /* HW invalid */ +#define _PAGE_RO 0x200 /* HW read-only bit */ +#define _PAGE_INVALID 0x400 /* HW invalid bit */ +#define _PAGE_SWT 0x001 /* SW pte type bit t */ +#define _PAGE_SWX 0x002 /* SW pte type bit x */ -/* Mask and six different types of pages. */ -#define _PAGE_TYPE_MASK 0x601 +/* Six different types of pages. */ #define _PAGE_TYPE_EMPTY 0x400 #define _PAGE_TYPE_NONE 0x401 -#define _PAGE_TYPE_SWAP 0x600 -#define _PAGE_TYPE_FILE 0x601 +#define _PAGE_TYPE_SWAP 0x403 +#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ #define _PAGE_TYPE_RO 0x200 #define _PAGE_TYPE_RW 0x000 +#define _PAGE_TYPE_EX_RO 0x202 +#define _PAGE_TYPE_EX_RW 0x002 + +/* + * PTE type bits are rather complicated. handle_pte_fault uses pte_present, + * pte_none and pte_file to find out the pte type WITHOUT holding the page + * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to + * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs + * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. + * This change is done while holding the lock, but the intermediate step + * of a previously valid pte with the hw invalid bit set can be observed by + * handle_pte_fault. That makes it necessary that all valid pte types with + * the hw invalid bit set must be distinguishable from the four pte types + * empty, none, swap and file. + * + * irxt ipte irxt + * _PAGE_TYPE_EMPTY 1000 -> 1000 + * _PAGE_TYPE_NONE 1001 -> 1001 + * _PAGE_TYPE_SWAP 1011 -> 1011 + * _PAGE_TYPE_FILE 11?1 -> 11?1 + * _PAGE_TYPE_RO 0100 -> 1100 + * _PAGE_TYPE_RW 0000 -> 1000 + * _PAGE_TYPE_EX_RO 0110 -> 1110 + * _PAGE_TYPE_EX_RW 0010 -> 1010 + * + * pte_none is true for bits combinations 1000, 1010, 1100, 1110 + * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 + * pte_file is true for bits combinations 1101, 1111 + * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. + */ #ifndef __s390x__ @@ -274,33 +317,100 @@ extern char empty_zero_page[PAGE_SIZE]; #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) #define PAGE_RO __pgprot(_PAGE_TYPE_RO) #define PAGE_RW __pgprot(_PAGE_TYPE_RW) +#define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) +#define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) #define PAGE_KERNEL PAGE_RW #define PAGE_COPY PAGE_RO /* - * The S390 can't do page protection for execute, and considers that the - * same are read. Also, write permissions imply read permissions. This is - * the closest we can get.. + * Dependent on the EXEC_PROTECT option s390 can do execute protection. + * Write permission always implies read permission. In theory with a + * primary/secondary page table execute only can be implemented but + * it would cost an additional bit in the pte to distinguish all the + * different pte types. To avoid that execute permission currently + * implies read permission as well. */ /*xwr*/ #define __P000 PAGE_NONE #define __P001 PAGE_RO #define __P010 PAGE_RO #define __P011 PAGE_RO -#define __P100 PAGE_RO -#define __P101 PAGE_RO -#define __P110 PAGE_RO -#define __P111 PAGE_RO +#define __P100 PAGE_EX_RO +#define __P101 PAGE_EX_RO +#define __P110 PAGE_EX_RO +#define __P111 PAGE_EX_RO #define __S000 PAGE_NONE #define __S001 PAGE_RO #define __S010 PAGE_RW #define __S011 PAGE_RW -#define __S100 PAGE_RO -#define __S101 PAGE_RO -#define __S110 PAGE_RW -#define __S111 PAGE_RW +#define __S100 PAGE_EX_RO +#define __S101 PAGE_EX_RO +#define __S110 PAGE_EX_RW +#define __S111 PAGE_EX_RW + +#ifndef __s390x__ +# define PMD_SHADOW_SHIFT 1 +# define PGD_SHADOW_SHIFT 1 +#else /* __s390x__ */ +# define PMD_SHADOW_SHIFT 2 +# define PGD_SHADOW_SHIFT 2 +#endif /* __s390x__ */ + +static inline struct page *get_shadow_page(struct page *page) +{ + if (s390_noexec && !list_empty(&page->lru)) + return virt_to_page(page->lru.next); + return NULL; +} + +static inline pte_t *get_shadow_pte(pte_t *ptep) +{ + unsigned long pteptr = (unsigned long) (ptep); + + if (s390_noexec) { + unsigned long offset = pteptr & (PAGE_SIZE - 1); + void *addr = (void *) (pteptr ^ offset); + struct page *page = virt_to_page(addr); + if (!list_empty(&page->lru)) + return (pte_t *) ((unsigned long) page->lru.next | + offset); + } + return NULL; +} + +static inline pmd_t *get_shadow_pmd(pmd_t *pmdp) +{ + unsigned long pmdptr = (unsigned long) (pmdp); + + if (s390_noexec) { + unsigned long offset = pmdptr & + ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1); + void *addr = (void *) (pmdptr ^ offset); + struct page *page = virt_to_page(addr); + if (!list_empty(&page->lru)) + return (pmd_t *) ((unsigned long) page->lru.next | + offset); + } + return NULL; +} + +static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) +{ + unsigned long pgdptr = (unsigned long) (pgdp); + + if (s390_noexec) { + unsigned long offset = pgdptr & + ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1); + void *addr = (void *) (pgdptr ^ offset); + struct page *page = virt_to_page(addr); + if (!list_empty(&page->lru)) + return (pgd_t *) ((unsigned long) page->lru.next | + offset); + } + return NULL; +} /* * Certain architectures need to do special things when PTEs @@ -309,7 +419,16 @@ extern char empty_zero_page[PAGE_SIZE]; */ static inline void set_pte(pte_t *pteptr, pte_t pteval) { + pte_t *shadow_pte = get_shadow_pte(pteptr); + *pteptr = pteval; + if (shadow_pte) { + if (!(pte_val(pteval) & _PAGE_INVALID) && + (pte_val(pteval) & _PAGE_SWX)) + pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO; + else + pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; + } } #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) @@ -365,18 +484,21 @@ static inline int pmd_bad(pmd_t pmd) static inline int pte_none(pte_t pte) { - return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_EMPTY; + return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); } static inline int pte_present(pte_t pte) { - return !(pte_val(pte) & _PAGE_INVALID) || - (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_NONE; + unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; + return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || + (!(pte_val(pte) & _PAGE_INVALID) && + !(pte_val(pte) & _PAGE_SWT)); } static inline int pte_file(pte_t pte) { - return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_FILE; + unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; + return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; } #define pte_same(a,b) (pte_val(a) == pte_val(b)) @@ -424,7 +546,7 @@ static inline int pte_read(pte_t pte) static inline void pgd_clear(pgd_t * pgdp) { } -static inline void pmd_clear(pmd_t * pmdp) +static inline void pmd_clear_kernel(pmd_t * pmdp) { pmd_val(pmdp[0]) = _PAGE_TABLE_INV; pmd_val(pmdp[1]) = _PAGE_TABLE_INV; @@ -432,24 +554,55 @@ static inline void pmd_clear(pmd_t * pmdp) pmd_val(pmdp[3]) = _PAGE_TABLE_INV; } +static inline void pmd_clear(pmd_t * pmdp) +{ + pmd_t *shadow_pmd = get_shadow_pmd(pmdp); + + pmd_clear_kernel(pmdp); + if (shadow_pmd) + pmd_clear_kernel(shadow_pmd); +} + #else /* __s390x__ */ -static inline void pgd_clear(pgd_t * pgdp) +static inline void pgd_clear_kernel(pgd_t * pgdp) { pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; } -static inline void pmd_clear(pmd_t * pmdp) +static inline void pgd_clear(pgd_t * pgdp) +{ + pgd_t *shadow_pgd = get_shadow_pgd(pgdp); + + pgd_clear_kernel(pgdp); + if (shadow_pgd) + pgd_clear_kernel(shadow_pgd); +} + +static inline void pmd_clear_kernel(pmd_t * pmdp) { pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; } +static inline void pmd_clear(pmd_t * pmdp) +{ + pmd_t *shadow_pmd = get_shadow_pmd(pmdp); + + pmd_clear_kernel(pmdp); + if (shadow_pmd) + pmd_clear_kernel(shadow_pmd); +} + #endif /* __s390x__ */ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { + pte_t *shadow_pte = get_shadow_pte(ptep); + pte_val(*ptep) = _PAGE_TYPE_EMPTY; + if (shadow_pte) + pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; } /* @@ -554,9 +707,10 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) /* ipte in zarch mode can do the math */ pte_t *pto = ptep; #endif - asm volatile ("ipte %2,%3" - : "=m" (*ptep) : "m" (*ptep), - "a" (pto), "a" (address) ); + asm volatile( + " ipte %2,%3" + : "=m" (*ptep) : "m" (*ptep), + "a" (pto), "a" (address)); } pte_val(*ptep) = _PAGE_TYPE_EMPTY; } @@ -566,8 +720,11 @@ ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { pte_t pte = *ptep; + pte_t *shadow_pte = get_shadow_pte(ptep); __ptep_ipte(address, ptep); + if (shadow_pte) + __ptep_ipte(address, shadow_pte); return pte; } @@ -596,30 +753,31 @@ ptep_establish(struct vm_area_struct *vma, * should therefore only be called if it is not mapped in any * address space. */ -#define page_test_and_clear_dirty(_page) \ -({ \ - struct page *__page = (_page); \ - unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ - int __skey = page_get_storage_key(__physpage); \ - if (__skey & _PAGE_CHANGED) \ - page_set_storage_key(__physpage, __skey & ~_PAGE_CHANGED);\ - (__skey & _PAGE_CHANGED); \ -}) +static inline int page_test_and_clear_dirty(struct page *page) +{ + unsigned long physpage = page_to_phys(page); + int skey = page_get_storage_key(physpage); + + if (skey & _PAGE_CHANGED) + page_set_storage_key(physpage, skey & ~_PAGE_CHANGED); + return skey & _PAGE_CHANGED; +} /* * Test and clear referenced bit in storage key. */ -#define page_test_and_clear_young(page) \ -({ \ - struct page *__page = (page); \ - unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ - int __ccode; \ - asm volatile ("rrbe 0,%1\n\t" \ - "ipm %0\n\t" \ - "srl %0,28\n\t" \ - : "=d" (__ccode) : "a" (__physpage) : "cc" ); \ - (__ccode & 2); \ -}) +static inline int page_test_and_clear_young(struct page *page) +{ + unsigned long physpage = page_to_phys(page); + int ccode; + + asm volatile( + " rrbe 0,%1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (ccode) : "a" (physpage) : "cc" ); + return ccode & 2; +} /* * Conversion functions: convert a page and protection to a page entry, @@ -632,43 +790,41 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) return __pte; } -#define mk_pte(pg, pgprot) \ -({ \ - struct page *__page = (pg); \ - pgprot_t __pgprot = (pgprot); \ - unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ - pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ - __pte; \ -}) +static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) +{ + unsigned long physpage = page_to_phys(page); + + return mk_pte_phys(physpage, pgprot); +} -#define pfn_pte(pfn, pgprot) \ -({ \ - pgprot_t __pgprot = (pgprot); \ - unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ - pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ - __pte; \ -}) +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + unsigned long physpage = __pa((pfn) << PAGE_SHIFT); + + return mk_pte_phys(physpage, pgprot); +} #ifdef __s390x__ -#define pfn_pmd(pfn, pgprot) \ -({ \ - pgprot_t __pgprot = (pgprot); \ - unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ - pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \ - __pmd; \ -}) +static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) +{ + unsigned long physpage = __pa((pfn) << PAGE_SHIFT); + + return __pmd(physpage + pgprot_val(pgprot)); +} #endif /* __s390x__ */ #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK) +#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) + +#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) -#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT)) +#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) -#define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK) +#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) /* to find an entry in a page-table-directory */ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) @@ -690,14 +846,14 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) /* Find an entry in the second-level page table.. */ #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pmd_offset(dir,addr) \ - ((pmd_t *) pgd_page_kernel(*(dir)) + pmd_index(addr)) + ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr)) #endif /* __s390x__ */ /* Find an entry in the third-level page table.. */ #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) #define pte_offset_kernel(pmd, address) \ - ((pte_t *) pmd_page_kernel(*(pmd)) + pte_index(address)) + ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) @@ -776,11 +932,17 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define kern_addr_valid(addr) (1) +extern int add_shared_memory(unsigned long start, unsigned long size); +extern int remove_shared_memory(unsigned long start, unsigned long size); + /* * No page table caches to initialise */ #define pgtable_cache_init() do { } while (0) +#define __HAVE_ARCH_MEMMAP_INIT +extern void memmap_init(unsigned long, int, unsigned long, unsigned long); + #define __HAVE_ARCH_PTEP_ESTABLISH #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG