1 /* arch/sparc64/mm/tsb.c
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <asm/system.h>
9 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgtable.h>
14 #include <asm/oplib.h>
16 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
18 static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
21 return vaddr & (nentries - 1);
24 static inline int tag_compare(unsigned long tag, unsigned long vaddr)
26 return (tag == (vaddr >> 22));
29 /* TSB flushes need only occur on the processor initiating the address
30 * space modification, not on each cpu the address space has run on.
31 * Only the TLB flush needs that treatment.
34 void flush_tsb_kernel_range(unsigned long start, unsigned long end)
38 for (v = start; v < end; v += PAGE_SIZE) {
39 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
40 struct tsb *ent = &swapper_tsb[hash];
42 if (tag_compare(ent->tag, v)) {
43 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
44 membar_storeload_storestore();
49 void flush_tsb_user(struct mmu_gather *mp)
51 struct mm_struct *mm = mp->mm;
52 unsigned long nentries, base, flags;
56 spin_lock_irqsave(&mm->context.lock, flags);
58 tsb = mm->context.tsb;
59 nentries = mm->context.tsb_nentries;
61 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
64 base = (unsigned long) tsb;
66 for (i = 0; i < mp->tlb_nr; i++) {
67 unsigned long v = mp->vaddrs[i];
68 unsigned long tag, ent, hash;
72 hash = tsb_hash(v, nentries);
73 ent = base + (hash * sizeof(struct tsb));
79 spin_unlock_irqrestore(&mm->context.lock, flags);
82 static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
84 unsigned long tsb_reg, base, tsb_paddr;
85 unsigned long page_sz, tte;
87 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
90 tte = pgprot_val(PAGE_KERNEL_LOCKED);
91 tsb_paddr = __pa(mm->context.tsb);
92 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
94 /* Use the smallest page size that can map the whole TSB
100 #ifdef DCACHE_ALIASING_POSSIBLE
101 base += (tsb_paddr & 8192);
123 page_sz = 512 * 1024;
128 page_sz = 512 * 1024;
133 page_sz = 512 * 1024;
138 page_sz = 4 * 1024 * 1024;
144 tte |= pte_sz_bits(page_sz);
146 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
147 /* Physical mapping, no locked TLB entry for TSB. */
148 tsb_reg |= tsb_paddr;
150 mm->context.tsb_reg_val = tsb_reg;
151 mm->context.tsb_map_vaddr = 0;
152 mm->context.tsb_map_pte = 0;
155 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
156 tte |= (tsb_paddr & ~(page_sz - 1UL));
158 mm->context.tsb_reg_val = tsb_reg;
159 mm->context.tsb_map_vaddr = base;
160 mm->context.tsb_map_pte = tte;
163 /* Setup the Hypervisor TSB descriptor. */
164 if (tlb_type == hypervisor) {
165 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
170 hp->pgsz_idx = HV_PGSZ_IDX_8K;
174 hp->pgsz_idx = HV_PGSZ_IDX_64K;
178 hp->pgsz_idx = HV_PGSZ_IDX_512K;
181 case 4 * 1024 * 1024:
182 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
186 hp->num_ttes = tsb_bytes / 16;
191 hp->pgsz_mask = HV_PGSZ_MASK_8K;
195 hp->pgsz_mask = HV_PGSZ_MASK_64K;
199 hp->pgsz_mask = HV_PGSZ_MASK_512K;
202 case 4 * 1024 * 1024:
203 hp->pgsz_mask = HV_PGSZ_MASK_4MB;
206 hp->tsb_base = tsb_paddr;
211 static kmem_cache_t *tsb_caches[8] __read_mostly;
213 static const char *tsb_cache_names[8] = {
224 void __init tsb_cache_init(void)
228 for (i = 0; i < 8; i++) {
229 unsigned long size = 8192 << i;
230 const char *name = tsb_cache_names[i];
232 tsb_caches[i] = kmem_cache_create(name,
235 SLAB_MUST_HWCACHE_ALIGN,
237 if (!tsb_caches[i]) {
238 prom_printf("Could not create %s cache\n", name);
244 /* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
245 * do_sparc64_fault() invokes this routine to try and grow the TSB.
247 * When we reach the maximum TSB size supported, we stick ~0UL into
248 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
249 * will not trigger any longer.
251 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
252 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
253 * must be 512K aligned. It also must be physically contiguous, so we
254 * cannot use vmalloc().
256 * The idea here is to grow the TSB when the RSS of the process approaches
257 * the number of entries that the current TSB can hold at once. Currently,
258 * we trigger when the RSS hits 3/4 of the TSB capacity.
260 void tsb_grow(struct mm_struct *mm, unsigned long rss)
262 unsigned long max_tsb_size = 1 * 1024 * 1024;
263 unsigned long new_size, old_size, flags;
264 struct tsb *old_tsb, *new_tsb;
265 unsigned long new_cache_index, old_cache_index;
266 unsigned long new_rss_limit;
269 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
270 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
273 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
274 unsigned long n_entries = new_size / sizeof(struct tsb);
276 n_entries = (n_entries * 3) / 4;
283 if (new_size == max_tsb_size)
284 new_rss_limit = ~0UL;
286 new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4;
289 gfp_flags = GFP_KERNEL;
290 if (new_size > (PAGE_SIZE * 2))
291 gfp_flags = __GFP_NOWARN | __GFP_NORETRY;
293 new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags);
294 if (unlikely(!new_tsb)) {
295 /* Not being able to fork due to a high-order TSB
296 * allocation failure is very bad behavior. Just back
297 * down to a 0-order allocation and force no TSB
298 * growing for this address space.
300 if (mm->context.tsb == NULL && new_cache_index > 0) {
303 new_rss_limit = ~0UL;
304 goto retry_tsb_alloc;
307 /* If we failed on a TSB grow, we are under serious
308 * memory pressure so don't try to grow any more.
310 if (mm->context.tsb != NULL)
311 mm->context.tsb_rss_limit = ~0UL;
315 /* Mark all tags as invalid. */
316 tsb_init(new_tsb, new_size);
318 /* Ok, we are about to commit the changes. If we are
319 * growing an existing TSB the locking is very tricky,
322 * We have to hold mm->context.lock while committing to the
323 * new TSB, this synchronizes us with processors in
324 * flush_tsb_user() and switch_mm() for this address space.
326 * But even with that lock held, processors run asynchronously
327 * accessing the old TSB via TLB miss handling. This is OK
328 * because those actions are just propagating state from the
329 * Linux page tables into the TSB, page table mappings are not
330 * being changed. If a real fault occurs, the processor will
331 * synchronize with us when it hits flush_tsb_user(), this is
332 * also true for the case where vmscan is modifying the page
333 * tables. The only thing we need to be careful with is to
334 * skip any locked TSB entries during copy_tsb().
336 * When we finish committing to the new TSB, we have to drop
337 * the lock and ask all other cpus running this address space
338 * to run tsb_context_switch() to see the new TSB table.
340 spin_lock_irqsave(&mm->context.lock, flags);
342 old_tsb = mm->context.tsb;
343 old_cache_index = (mm->context.tsb_reg_val & 0x7UL);
344 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
347 /* Handle multiple threads trying to grow the TSB at the same time.
348 * One will get in here first, and bump the size and the RSS limit.
349 * The others will get in here next and hit this check.
351 if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) {
352 spin_unlock_irqrestore(&mm->context.lock, flags);
354 kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
358 mm->context.tsb_rss_limit = new_rss_limit;
361 extern void copy_tsb(unsigned long old_tsb_base,
362 unsigned long old_tsb_size,
363 unsigned long new_tsb_base,
364 unsigned long new_tsb_size);
365 unsigned long old_tsb_base = (unsigned long) old_tsb;
366 unsigned long new_tsb_base = (unsigned long) new_tsb;
368 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
369 old_tsb_base = __pa(old_tsb_base);
370 new_tsb_base = __pa(new_tsb_base);
372 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
375 mm->context.tsb = new_tsb;
376 setup_tsb_params(mm, new_size);
378 spin_unlock_irqrestore(&mm->context.lock, flags);
380 /* If old_tsb is NULL, we're being invoked for the first time
381 * from init_new_context().
384 /* Reload it on the local cpu. */
385 tsb_context_switch(mm);
387 /* Now force other processors to do the same. */
390 /* Now it is safe to free the old tsb. */
391 kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
395 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
397 spin_lock_init(&mm->context.lock);
399 mm->context.sparc64_ctx_val = 0UL;
401 /* copy_mm() copies over the parent's mm_struct before calling
402 * us, so we need to zero out the TSB pointer or else tsb_grow()
403 * will be confused and think there is an older TSB to free up.
405 mm->context.tsb = NULL;
407 /* If this is fork, inherit the parent's TSB size. We would
408 * grow it to that size on the first page fault anyways.
410 tsb_grow(mm, get_mm_rss(mm));
412 if (unlikely(!mm->context.tsb))
418 void destroy_context(struct mm_struct *mm)
420 unsigned long flags, cache_index;
422 cache_index = (mm->context.tsb_reg_val & 0x7UL);
423 kmem_cache_free(tsb_caches[cache_index], mm->context.tsb);
425 /* We can remove these later, but for now it's useful
426 * to catch any bogus post-destroy_context() references
429 mm->context.tsb = NULL;
430 mm->context.tsb_reg_val = 0UL;
432 spin_lock_irqsave(&ctx_alloc_lock, flags);
434 if (CTX_VALID(mm->context)) {
435 unsigned long nr = CTX_NRBITS(mm->context);
436 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
439 spin_unlock_irqrestore(&ctx_alloc_lock, flags);