1 /* $Id: mmu_context.h,v 1.54 2002/02/09 19:49:31 davem Exp $ */
2 #ifndef __SPARC64_MMU_CONTEXT_H
3 #define __SPARC64_MMU_CONTEXT_H
5 /* Derived heavily from Linus's Alpha/AXP ASN code... */
9 #include <linux/spinlock.h>
10 #include <asm/system.h>
11 #include <asm/spitfire.h>
13 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17 extern spinlock_t ctx_alloc_lock;
18 extern unsigned long tlb_context_cache;
19 extern unsigned long mmu_context_bmap[];
21 extern void get_new_mmu_context(struct mm_struct *mm);
22 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
23 extern void destroy_context(struct mm_struct *mm);
25 extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg,
26 unsigned long tsb_vaddr, unsigned long tsb_pte);
28 static inline void tsb_context_switch(struct mm_struct *mm)
30 __tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
31 mm->context.tsb_map_vaddr,
32 mm->context.tsb_map_pte);
35 extern void tsb_grow(struct mm_struct *mm, unsigned long mm_rss, gfp_t gfp_flags);
37 extern void smp_tsb_sync(struct mm_struct *mm);
39 #define smp_tsb_sync(__mm) do { } while (0)
42 /* Set MMU context in the actual hardware. */
43 #define load_secondary_context(__mm) \
44 __asm__ __volatile__( \
45 "\n661: stxa %0, [%1] %2\n" \
46 " .section .sun4v_1insn_patch, \"ax\"\n" \
48 " stxa %0, [%1] %3\n" \
52 : "r" (CTX_HWBITS((__mm)->context)), \
53 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
55 extern void __flush_tlb_mm(unsigned long, unsigned long);
57 /* Switch the current MM context. */
58 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
60 unsigned long ctx_valid;
63 /* Note: page_table_lock is used here to serialize switch_mm
64 * and activate_mm, and their calls to get_new_mmu_context.
65 * This use of page_table_lock is unrelated to its other uses.
67 spin_lock(&mm->page_table_lock);
68 ctx_valid = CTX_VALID(mm->context);
70 get_new_mmu_context(mm);
71 spin_unlock(&mm->page_table_lock);
73 if (!ctx_valid || (old_mm != mm)) {
74 load_secondary_context(mm);
75 tsb_context_switch(mm);
78 /* Even if (mm == old_mm) we _must_ check
79 * the cpu_vm_mask. If we do not we could
80 * corrupt the TLB state because of how
81 * smp_flush_tlb_{page,range,mm} on sparc64
82 * and lazy tlb switches work. -DaveM
84 cpu = smp_processor_id();
85 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
86 cpu_set(cpu, mm->cpu_vm_mask);
87 __flush_tlb_mm(CTX_HWBITS(mm->context),
92 #define deactivate_mm(tsk,mm) do { } while (0)
94 /* Activate a new MM instance for the current task. */
95 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
99 /* Note: page_table_lock is used here to serialize switch_mm
100 * and activate_mm, and their calls to get_new_mmu_context.
101 * This use of page_table_lock is unrelated to its other uses.
103 spin_lock(&mm->page_table_lock);
104 if (!CTX_VALID(mm->context))
105 get_new_mmu_context(mm);
106 cpu = smp_processor_id();
107 if (!cpu_isset(cpu, mm->cpu_vm_mask))
108 cpu_set(cpu, mm->cpu_vm_mask);
109 spin_unlock(&mm->page_table_lock);
111 load_secondary_context(mm);
112 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
113 tsb_context_switch(mm);
116 #endif /* !(__ASSEMBLY__) */
118 #endif /* !(__SPARC64_MMU_CONTEXT_H) */