]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-sparc64/pgalloc.h
[SPARC64]: Move away from virtual page tables, part 1.
[linux-2.6-omap-h63xx.git] / include / asm-sparc64 / pgalloc.h
1 /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
2 #ifndef _SPARC64_PGALLOC_H
3 #define _SPARC64_PGALLOC_H
4
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9
10 #include <asm/spitfire.h>
11 #include <asm/cpudata.h>
12 #include <asm/cacheflush.h>
13 #include <asm/page.h>
14
15 /* Page table allocation/freeing. */
16 #ifdef CONFIG_SMP
17 /* Sliiiicck */
18 #define pgt_quicklists  local_cpu_data()
19 #else
20 extern struct pgtable_cache_struct {
21         unsigned long *pgd_cache;
22         unsigned long *pte_cache[2];
23         unsigned int pgcache_size;
24 } pgt_quicklists;
25 #endif
26 #define pgd_quicklist           (pgt_quicklists.pgd_cache)
27 #define pmd_quicklist           ((unsigned long *)0)
28 #define pte_quicklist           (pgt_quicklists.pte_cache)
29 #define pgtable_cache_size      (pgt_quicklists.pgcache_size)
30
31 static __inline__ void free_pgd_fast(pgd_t *pgd)
32 {
33         preempt_disable();
34         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
35         pgd_quicklist = (unsigned long *) pgd;
36         pgtable_cache_size++;
37         preempt_enable();
38 }
39
40 static __inline__ pgd_t *get_pgd_fast(void)
41 {
42         unsigned long *ret;
43
44         preempt_disable();
45         if((ret = pgd_quicklist) != NULL) {
46                 pgd_quicklist = (unsigned long *)(*ret);
47                 ret[0] = 0;
48                 pgtable_cache_size--;
49                 preempt_enable();
50         } else {
51                 preempt_enable();
52                 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
53                 if(ret)
54                         memset(ret, 0, PAGE_SIZE);
55         }
56         return (pgd_t *)ret;
57 }
58
59 static __inline__ void free_pgd_slow(pgd_t *pgd)
60 {
61         free_page((unsigned long)pgd);
62 }
63
64 /* XXX This crap can die, no longer using virtual page tables... */
65 #ifdef DCACHE_ALIASING_POSSIBLE
66 #define VPTE_COLOR(address)             (((address) >> (PAGE_SHIFT + 10)) & 1UL)
67 #define DCACHE_COLOR(address)           (((address) >> PAGE_SHIFT) & 1UL)
68 #else
69 #define VPTE_COLOR(address)             0
70 #define DCACHE_COLOR(address)           0
71 #endif
72
73 #define pud_populate(MM, PUD, PMD)      pud_set(PUD, PMD)
74
75 static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
76 {
77         unsigned long *ret;
78         int color = 0;
79
80         preempt_disable();
81         if (pte_quicklist[color] == NULL)
82                 color = 1;
83
84         if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
85                 pte_quicklist[color] = (unsigned long *)(*ret);
86                 ret[0] = 0;
87                 pgtable_cache_size--;
88         }
89         preempt_enable();
90
91         return (pmd_t *)ret;
92 }
93
94 static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
95 {
96         pmd_t *pmd;
97
98         pmd = pmd_alloc_one_fast(mm, address);
99         if (!pmd) {
100                 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
101                 if (pmd)
102                         memset(pmd, 0, PAGE_SIZE);
103         }
104         return pmd;
105 }
106
107 static __inline__ void free_pmd_fast(pmd_t *pmd)
108 {
109         unsigned long color = DCACHE_COLOR((unsigned long)pmd);
110
111         preempt_disable();
112         *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
113         pte_quicklist[color] = (unsigned long *) pmd;
114         pgtable_cache_size++;
115         preempt_enable();
116 }
117
118 static __inline__ void free_pmd_slow(pmd_t *pmd)
119 {
120         free_page((unsigned long)pmd);
121 }
122
123 #define pmd_populate_kernel(MM, PMD, PTE)       pmd_set(PMD, PTE)
124 #define pmd_populate(MM,PMD,PTE_PAGE)           \
125         pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
126
127 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
128
129 static inline struct page *
130 pte_alloc_one(struct mm_struct *mm, unsigned long addr)
131 {
132         pte_t *pte = pte_alloc_one_kernel(mm, addr);
133
134         if (pte)
135                 return virt_to_page(pte);
136
137         return NULL;
138 }
139
140 static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
141 {
142         unsigned long color = VPTE_COLOR(address);
143         unsigned long *ret;
144
145         preempt_disable();
146         if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
147                 pte_quicklist[color] = (unsigned long *)(*ret);
148                 ret[0] = 0;
149                 pgtable_cache_size--;
150         }
151         preempt_enable();
152         return (pte_t *)ret;
153 }
154
155 static __inline__ void free_pte_fast(pte_t *pte)
156 {
157         unsigned long color = DCACHE_COLOR((unsigned long)pte);
158
159         preempt_disable();
160         *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
161         pte_quicklist[color] = (unsigned long *) pte;
162         pgtable_cache_size++;
163         preempt_enable();
164 }
165
166 static __inline__ void free_pte_slow(pte_t *pte)
167 {
168         free_page((unsigned long)pte);
169 }
170
171 static inline void pte_free_kernel(pte_t *pte)
172 {
173         free_pte_fast(pte);
174 }
175
176 static inline void pte_free(struct page *ptepage)
177 {
178         free_pte_fast(page_address(ptepage));
179 }
180
181 #define pmd_free(pmd)           free_pmd_fast(pmd)
182 #define pgd_free(pgd)           free_pgd_fast(pgd)
183 #define pgd_alloc(mm)           get_pgd_fast()
184
185 #endif /* _SPARC64_PGALLOC_H */