]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - arch/sparc64/mm/init.c
[SPARC64]: Fix 32-bit truncation which broke sparsemem.
[linux-2.6-omap-h63xx.git] / arch / sparc64 / mm / init.c
1 /*  $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2  *  arch/sparc64/mm/init.c
3  *
4  *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5  *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6  */
7  
8 #include <linux/config.h>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mm.h>
16 #include <linux/hugetlb.h>
17 #include <linux/slab.h>
18 #include <linux/initrd.h>
19 #include <linux/swap.h>
20 #include <linux/pagemap.h>
21 #include <linux/fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/kprobes.h>
24 #include <linux/cache.h>
25 #include <linux/sort.h>
26
27 #include <asm/head.h>
28 #include <asm/system.h>
29 #include <asm/page.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pgtable.h>
32 #include <asm/oplib.h>
33 #include <asm/iommu.h>
34 #include <asm/io.h>
35 #include <asm/uaccess.h>
36 #include <asm/mmu_context.h>
37 #include <asm/tlbflush.h>
38 #include <asm/dma.h>
39 #include <asm/starfire.h>
40 #include <asm/tlb.h>
41 #include <asm/spitfire.h>
42 #include <asm/sections.h>
43 #include <asm/tsb.h>
44 #include <asm/hypervisor.h>
45
46 extern void device_scan(void);
47
48 #define MAX_PHYS_ADDRESS        (1UL << 42UL)
49 #define KPTE_BITMAP_CHUNK_SZ    (256UL * 1024UL * 1024UL)
50 #define KPTE_BITMAP_BYTES       \
51         ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
52
53 unsigned long kern_linear_pte_xor[2] __read_mostly;
54
55 /* A bitmap, one bit for every 256MB of physical memory.  If the bit
56  * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
57  * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
58  */
59 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
60
61 /* A special kernel TSB for 4MB and 256MB linear mappings.  */
62 struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
63
64 #define MAX_BANKS       32
65
66 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
67 static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
68 static int pavail_ents __initdata;
69 static int pavail_rescan_ents __initdata;
70
71 static int cmp_p64(const void *a, const void *b)
72 {
73         const struct linux_prom64_registers *x = a, *y = b;
74
75         if (x->phys_addr > y->phys_addr)
76                 return 1;
77         if (x->phys_addr < y->phys_addr)
78                 return -1;
79         return 0;
80 }
81
82 static void __init read_obp_memory(const char *property,
83                                    struct linux_prom64_registers *regs,
84                                    int *num_ents)
85 {
86         int node = prom_finddevice("/memory");
87         int prop_size = prom_getproplen(node, property);
88         int ents, ret, i;
89
90         ents = prop_size / sizeof(struct linux_prom64_registers);
91         if (ents > MAX_BANKS) {
92                 prom_printf("The machine has more %s property entries than "
93                             "this kernel can support (%d).\n",
94                             property, MAX_BANKS);
95                 prom_halt();
96         }
97
98         ret = prom_getproperty(node, property, (char *) regs, prop_size);
99         if (ret == -1) {
100                 prom_printf("Couldn't get %s property from /memory.\n");
101                 prom_halt();
102         }
103
104         *num_ents = ents;
105
106         /* Sanitize what we got from the firmware, by page aligning
107          * everything.
108          */
109         for (i = 0; i < ents; i++) {
110                 unsigned long base, size;
111
112                 base = regs[i].phys_addr;
113                 size = regs[i].reg_size;
114
115                 size &= PAGE_MASK;
116                 if (base & ~PAGE_MASK) {
117                         unsigned long new_base = PAGE_ALIGN(base);
118
119                         size -= new_base - base;
120                         if ((long) size < 0L)
121                                 size = 0UL;
122                         base = new_base;
123                 }
124                 regs[i].phys_addr = base;
125                 regs[i].reg_size = size;
126         }
127         sort(regs, ents, sizeof(struct linux_prom64_registers),
128              cmp_p64, NULL);
129 }
130
131 unsigned long *sparc64_valid_addr_bitmap __read_mostly;
132
133 /* Kernel physical address base and size in bytes.  */
134 unsigned long kern_base __read_mostly;
135 unsigned long kern_size __read_mostly;
136
137 /* get_new_mmu_context() uses "cache + 1".  */
138 DEFINE_SPINLOCK(ctx_alloc_lock);
139 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
140 #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
141 unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
142
143 /* References to special section boundaries */
144 extern char  _start[], _end[];
145
146 /* Initial ramdisk setup */
147 extern unsigned long sparc_ramdisk_image64;
148 extern unsigned int sparc_ramdisk_image;
149 extern unsigned int sparc_ramdisk_size;
150
151 struct page *mem_map_zero __read_mostly;
152
153 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
154
155 unsigned long sparc64_kern_pri_context __read_mostly;
156 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
157 unsigned long sparc64_kern_sec_context __read_mostly;
158
159 int bigkernel = 0;
160
161 kmem_cache_t *pgtable_cache __read_mostly;
162
163 static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
164 {
165         clear_page(addr);
166 }
167
168 void pgtable_cache_init(void)
169 {
170         pgtable_cache = kmem_cache_create("pgtable_cache",
171                                           PAGE_SIZE, PAGE_SIZE,
172                                           SLAB_HWCACHE_ALIGN |
173                                           SLAB_MUST_HWCACHE_ALIGN,
174                                           zero_ctor,
175                                           NULL);
176         if (!pgtable_cache) {
177                 prom_printf("pgtable_cache_init(): Could not create!\n");
178                 prom_halt();
179         }
180 }
181
182 #ifdef CONFIG_DEBUG_DCFLUSH
183 atomic_t dcpage_flushes = ATOMIC_INIT(0);
184 #ifdef CONFIG_SMP
185 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
186 #endif
187 #endif
188
189 inline void flush_dcache_page_impl(struct page *page)
190 {
191         BUG_ON(tlb_type == hypervisor);
192 #ifdef CONFIG_DEBUG_DCFLUSH
193         atomic_inc(&dcpage_flushes);
194 #endif
195
196 #ifdef DCACHE_ALIASING_POSSIBLE
197         __flush_dcache_page(page_address(page),
198                             ((tlb_type == spitfire) &&
199                              page_mapping(page) != NULL));
200 #else
201         if (page_mapping(page) != NULL &&
202             tlb_type == spitfire)
203                 __flush_icache_page(__pa(page_address(page)));
204 #endif
205 }
206
207 #define PG_dcache_dirty         PG_arch_1
208 #define PG_dcache_cpu_shift     24UL
209 #define PG_dcache_cpu_mask      (256UL - 1UL)
210
211 #if NR_CPUS > 256
212 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
213 #endif
214
215 #define dcache_dirty_cpu(page) \
216         (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
217
218 static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
219 {
220         unsigned long mask = this_cpu;
221         unsigned long non_cpu_bits;
222
223         non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
224         mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
225
226         __asm__ __volatile__("1:\n\t"
227                              "ldx       [%2], %%g7\n\t"
228                              "and       %%g7, %1, %%g1\n\t"
229                              "or        %%g1, %0, %%g1\n\t"
230                              "casx      [%2], %%g7, %%g1\n\t"
231                              "cmp       %%g7, %%g1\n\t"
232                              "membar    #StoreLoad | #StoreStore\n\t"
233                              "bne,pn    %%xcc, 1b\n\t"
234                              " nop"
235                              : /* no outputs */
236                              : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
237                              : "g1", "g7");
238 }
239
240 static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
241 {
242         unsigned long mask = (1UL << PG_dcache_dirty);
243
244         __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
245                              "1:\n\t"
246                              "ldx       [%2], %%g7\n\t"
247                              "srlx      %%g7, %4, %%g1\n\t"
248                              "and       %%g1, %3, %%g1\n\t"
249                              "cmp       %%g1, %0\n\t"
250                              "bne,pn    %%icc, 2f\n\t"
251                              " andn     %%g7, %1, %%g1\n\t"
252                              "casx      [%2], %%g7, %%g1\n\t"
253                              "cmp       %%g7, %%g1\n\t"
254                              "membar    #StoreLoad | #StoreStore\n\t"
255                              "bne,pn    %%xcc, 1b\n\t"
256                              " nop\n"
257                              "2:"
258                              : /* no outputs */
259                              : "r" (cpu), "r" (mask), "r" (&page->flags),
260                                "i" (PG_dcache_cpu_mask),
261                                "i" (PG_dcache_cpu_shift)
262                              : "g1", "g7");
263 }
264
265 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
266 {
267         unsigned long tsb_addr = (unsigned long) ent;
268
269         if (tlb_type == cheetah_plus || tlb_type == hypervisor)
270                 tsb_addr = __pa(tsb_addr);
271
272         __tsb_insert(tsb_addr, tag, pte);
273 }
274
275 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
276 unsigned long _PAGE_SZBITS __read_mostly;
277
278 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
279 {
280         struct mm_struct *mm;
281         struct tsb *tsb;
282         unsigned long tag;
283
284         if (tlb_type != hypervisor) {
285                 unsigned long pfn = pte_pfn(pte);
286                 unsigned long pg_flags;
287                 struct page *page;
288
289                 if (pfn_valid(pfn) &&
290                     (page = pfn_to_page(pfn), page_mapping(page)) &&
291                     ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
292                         int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
293                                    PG_dcache_cpu_mask);
294                         int this_cpu = get_cpu();
295
296                         /* This is just to optimize away some function calls
297                          * in the SMP case.
298                          */
299                         if (cpu == this_cpu)
300                                 flush_dcache_page_impl(page);
301                         else
302                                 smp_flush_dcache_page_impl(page, cpu);
303
304                         clear_dcache_dirty_cpu(page, cpu);
305
306                         put_cpu();
307                 }
308         }
309
310         mm = vma->vm_mm;
311         tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
312                                (mm->context.tsb_nentries - 1UL)];
313         tag = (address >> 22UL);
314         tsb_insert(tsb, tag, pte_val(pte));
315 }
316
317 void flush_dcache_page(struct page *page)
318 {
319         struct address_space *mapping;
320         int this_cpu;
321
322         if (tlb_type == hypervisor)
323                 return;
324
325         /* Do not bother with the expensive D-cache flush if it
326          * is merely the zero page.  The 'bigcore' testcase in GDB
327          * causes this case to run millions of times.
328          */
329         if (page == ZERO_PAGE(0))
330                 return;
331
332         this_cpu = get_cpu();
333
334         mapping = page_mapping(page);
335         if (mapping && !mapping_mapped(mapping)) {
336                 int dirty = test_bit(PG_dcache_dirty, &page->flags);
337                 if (dirty) {
338                         int dirty_cpu = dcache_dirty_cpu(page);
339
340                         if (dirty_cpu == this_cpu)
341                                 goto out;
342                         smp_flush_dcache_page_impl(page, dirty_cpu);
343                 }
344                 set_dcache_dirty(page, this_cpu);
345         } else {
346                 /* We could delay the flush for the !page_mapping
347                  * case too.  But that case is for exec env/arg
348                  * pages and those are %99 certainly going to get
349                  * faulted into the tlb (and thus flushed) anyways.
350                  */
351                 flush_dcache_page_impl(page);
352         }
353
354 out:
355         put_cpu();
356 }
357
358 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
359 {
360         /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
361         if (tlb_type == spitfire) {
362                 unsigned long kaddr;
363
364                 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE)
365                         __flush_icache_page(__get_phys(kaddr));
366         }
367 }
368
369 void show_mem(void)
370 {
371         printk("Mem-info:\n");
372         show_free_areas();
373         printk("Free swap:       %6ldkB\n",
374                nr_swap_pages << (PAGE_SHIFT-10));
375         printk("%ld pages of RAM\n", num_physpages);
376         printk("%d free pages\n", nr_free_pages());
377 }
378
379 void mmu_info(struct seq_file *m)
380 {
381         if (tlb_type == cheetah)
382                 seq_printf(m, "MMU Type\t: Cheetah\n");
383         else if (tlb_type == cheetah_plus)
384                 seq_printf(m, "MMU Type\t: Cheetah+\n");
385         else if (tlb_type == spitfire)
386                 seq_printf(m, "MMU Type\t: Spitfire\n");
387         else if (tlb_type == hypervisor)
388                 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
389         else
390                 seq_printf(m, "MMU Type\t: ???\n");
391
392 #ifdef CONFIG_DEBUG_DCFLUSH
393         seq_printf(m, "DCPageFlushes\t: %d\n",
394                    atomic_read(&dcpage_flushes));
395 #ifdef CONFIG_SMP
396         seq_printf(m, "DCPageFlushesXC\t: %d\n",
397                    atomic_read(&dcpage_flushes_xcall));
398 #endif /* CONFIG_SMP */
399 #endif /* CONFIG_DEBUG_DCFLUSH */
400 }
401
402 struct linux_prom_translation {
403         unsigned long virt;
404         unsigned long size;
405         unsigned long data;
406 };
407
408 /* Exported for kernel TLB miss handling in ktlb.S */
409 struct linux_prom_translation prom_trans[512] __read_mostly;
410 unsigned int prom_trans_ents __read_mostly;
411
412 /* Exported for SMP bootup purposes. */
413 unsigned long kern_locked_tte_data;
414
415 /* The obp translations are saved based on 8k pagesize, since obp can
416  * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
417  * HI_OBP_ADDRESS range are handled in ktlb.S.
418  */
419 static inline int in_obp_range(unsigned long vaddr)
420 {
421         return (vaddr >= LOW_OBP_ADDRESS &&
422                 vaddr < HI_OBP_ADDRESS);
423 }
424
425 static int cmp_ptrans(const void *a, const void *b)
426 {
427         const struct linux_prom_translation *x = a, *y = b;
428
429         if (x->virt > y->virt)
430                 return 1;
431         if (x->virt < y->virt)
432                 return -1;
433         return 0;
434 }
435
436 /* Read OBP translations property into 'prom_trans[]'.  */
437 static void __init read_obp_translations(void)
438 {
439         int n, node, ents, first, last, i;
440
441         node = prom_finddevice("/virtual-memory");
442         n = prom_getproplen(node, "translations");
443         if (unlikely(n == 0 || n == -1)) {
444                 prom_printf("prom_mappings: Couldn't get size.\n");
445                 prom_halt();
446         }
447         if (unlikely(n > sizeof(prom_trans))) {
448                 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
449                 prom_halt();
450         }
451
452         if ((n = prom_getproperty(node, "translations",
453                                   (char *)&prom_trans[0],
454                                   sizeof(prom_trans))) == -1) {
455                 prom_printf("prom_mappings: Couldn't get property.\n");
456                 prom_halt();
457         }
458
459         n = n / sizeof(struct linux_prom_translation);
460
461         ents = n;
462
463         sort(prom_trans, ents, sizeof(struct linux_prom_translation),
464              cmp_ptrans, NULL);
465
466         /* Now kick out all the non-OBP entries.  */
467         for (i = 0; i < ents; i++) {
468                 if (in_obp_range(prom_trans[i].virt))
469                         break;
470         }
471         first = i;
472         for (; i < ents; i++) {
473                 if (!in_obp_range(prom_trans[i].virt))
474                         break;
475         }
476         last = i;
477
478         for (i = 0; i < (last - first); i++) {
479                 struct linux_prom_translation *src = &prom_trans[i + first];
480                 struct linux_prom_translation *dest = &prom_trans[i];
481
482                 *dest = *src;
483         }
484         for (; i < ents; i++) {
485                 struct linux_prom_translation *dest = &prom_trans[i];
486                 dest->virt = dest->size = dest->data = 0x0UL;
487         }
488
489         prom_trans_ents = last - first;
490
491         if (tlb_type == spitfire) {
492                 /* Clear diag TTE bits. */
493                 for (i = 0; i < prom_trans_ents; i++)
494                         prom_trans[i].data &= ~0x0003fe0000000000UL;
495         }
496 }
497
498 static void __init hypervisor_tlb_lock(unsigned long vaddr,
499                                        unsigned long pte,
500                                        unsigned long mmu)
501 {
502         register unsigned long func asm("%o5");
503         register unsigned long arg0 asm("%o0");
504         register unsigned long arg1 asm("%o1");
505         register unsigned long arg2 asm("%o2");
506         register unsigned long arg3 asm("%o3");
507
508         func = HV_FAST_MMU_MAP_PERM_ADDR;
509         arg0 = vaddr;
510         arg1 = 0;
511         arg2 = pte;
512         arg3 = mmu;
513         __asm__ __volatile__("ta        0x80"
514                              : "=&r" (func), "=&r" (arg0),
515                                "=&r" (arg1), "=&r" (arg2),
516                                "=&r" (arg3)
517                              : "0" (func), "1" (arg0), "2" (arg1),
518                                "3" (arg2), "4" (arg3));
519         if (arg0 != 0) {
520                 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
521                             "errors with %lx\n", vaddr, 0, pte, mmu, arg0);
522                 prom_halt();
523         }
524 }
525
526 static unsigned long kern_large_tte(unsigned long paddr);
527
528 static void __init remap_kernel(void)
529 {
530         unsigned long phys_page, tte_vaddr, tte_data;
531         int tlb_ent = sparc64_highest_locked_tlbent();
532
533         tte_vaddr = (unsigned long) KERNBASE;
534         phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
535         tte_data = kern_large_tte(phys_page);
536
537         kern_locked_tte_data = tte_data;
538
539         /* Now lock us into the TLBs via Hypervisor or OBP. */
540         if (tlb_type == hypervisor) {
541                 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
542                 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
543                 if (bigkernel) {
544                         tte_vaddr += 0x400000;
545                         tte_data += 0x400000;
546                         hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
547                         hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
548                 }
549         } else {
550                 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
551                 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
552                 if (bigkernel) {
553                         tlb_ent -= 1;
554                         prom_dtlb_load(tlb_ent,
555                                        tte_data + 0x400000, 
556                                        tte_vaddr + 0x400000);
557                         prom_itlb_load(tlb_ent,
558                                        tte_data + 0x400000, 
559                                        tte_vaddr + 0x400000);
560                 }
561                 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
562         }
563         if (tlb_type == cheetah_plus) {
564                 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
565                                             CTX_CHEETAH_PLUS_NUC);
566                 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
567                 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
568         }
569 }
570
571
572 static void __init inherit_prom_mappings(void)
573 {
574         read_obp_translations();
575
576         /* Now fixup OBP's idea about where we really are mapped. */
577         prom_printf("Remapping the kernel... ");
578         remap_kernel();
579         prom_printf("done.\n");
580 }
581
582 void prom_world(int enter)
583 {
584         if (!enter)
585                 set_fs((mm_segment_t) { get_thread_current_ds() });
586
587         __asm__ __volatile__("flushw");
588 }
589
590 #ifdef DCACHE_ALIASING_POSSIBLE
591 void __flush_dcache_range(unsigned long start, unsigned long end)
592 {
593         unsigned long va;
594
595         if (tlb_type == spitfire) {
596                 int n = 0;
597
598                 for (va = start; va < end; va += 32) {
599                         spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
600                         if (++n >= 512)
601                                 break;
602                 }
603         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
604                 start = __pa(start);
605                 end = __pa(end);
606                 for (va = start; va < end; va += 32)
607                         __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
608                                              "membar #Sync"
609                                              : /* no outputs */
610                                              : "r" (va),
611                                                "i" (ASI_DCACHE_INVALIDATE));
612         }
613 }
614 #endif /* DCACHE_ALIASING_POSSIBLE */
615
616 /* Caller does TLB context flushing on local CPU if necessary.
617  * The caller also ensures that CTX_VALID(mm->context) is false.
618  *
619  * We must be careful about boundary cases so that we never
620  * let the user have CTX 0 (nucleus) or we ever use a CTX
621  * version of zero (and thus NO_CONTEXT would not be caught
622  * by version mis-match tests in mmu_context.h).
623  *
624  * Always invoked with interrupts disabled.
625  */
626 void get_new_mmu_context(struct mm_struct *mm)
627 {
628         unsigned long ctx, new_ctx;
629         unsigned long orig_pgsz_bits;
630         unsigned long flags;
631         int new_version;
632
633         spin_lock_irqsave(&ctx_alloc_lock, flags);
634         orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
635         ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
636         new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
637         new_version = 0;
638         if (new_ctx >= (1 << CTX_NR_BITS)) {
639                 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
640                 if (new_ctx >= ctx) {
641                         int i;
642                         new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
643                                 CTX_FIRST_VERSION;
644                         if (new_ctx == 1)
645                                 new_ctx = CTX_FIRST_VERSION;
646
647                         /* Don't call memset, for 16 entries that's just
648                          * plain silly...
649                          */
650                         mmu_context_bmap[0] = 3;
651                         mmu_context_bmap[1] = 0;
652                         mmu_context_bmap[2] = 0;
653                         mmu_context_bmap[3] = 0;
654                         for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
655                                 mmu_context_bmap[i + 0] = 0;
656                                 mmu_context_bmap[i + 1] = 0;
657                                 mmu_context_bmap[i + 2] = 0;
658                                 mmu_context_bmap[i + 3] = 0;
659                         }
660                         new_version = 1;
661                         goto out;
662                 }
663         }
664         mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
665         new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
666 out:
667         tlb_context_cache = new_ctx;
668         mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
669         spin_unlock_irqrestore(&ctx_alloc_lock, flags);
670
671         if (unlikely(new_version))
672                 smp_new_mmu_context_version();
673 }
674
675 void sparc_ultra_dump_itlb(void)
676 {
677         int slot;
678
679         if (tlb_type == spitfire) {
680                 printk ("Contents of itlb: ");
681                 for (slot = 0; slot < 14; slot++) printk ("    ");
682                 printk ("%2x:%016lx,%016lx\n",
683                         0,
684                         spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
685                 for (slot = 1; slot < 64; slot+=3) {
686                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
687                                 slot,
688                                 spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
689                                 slot+1,
690                                 spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1),
691                                 slot+2,
692                                 spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2));
693                 }
694         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
695                 printk ("Contents of itlb0:\n");
696                 for (slot = 0; slot < 16; slot+=2) {
697                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
698                                 slot,
699                                 cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot),
700                                 slot+1,
701                                 cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1));
702                 }
703                 printk ("Contents of itlb2:\n");
704                 for (slot = 0; slot < 128; slot+=2) {
705                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
706                                 slot,
707                                 cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot),
708                                 slot+1,
709                                 cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1));
710                 }
711         }
712 }
713
714 void sparc_ultra_dump_dtlb(void)
715 {
716         int slot;
717
718         if (tlb_type == spitfire) {
719                 printk ("Contents of dtlb: ");
720                 for (slot = 0; slot < 14; slot++) printk ("    ");
721                 printk ("%2x:%016lx,%016lx\n", 0,
722                         spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
723                 for (slot = 1; slot < 64; slot+=3) {
724                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", 
725                                 slot,
726                                 spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
727                                 slot+1,
728                                 spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
729                                 slot+2,
730                                 spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
731                 }
732         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
733                 printk ("Contents of dtlb0:\n");
734                 for (slot = 0; slot < 16; slot+=2) {
735                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
736                                 slot,
737                                 cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot),
738                                 slot+1,
739                                 cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1));
740                 }
741                 printk ("Contents of dtlb2:\n");
742                 for (slot = 0; slot < 512; slot+=2) {
743                         printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
744                                 slot,
745                                 cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2),
746                                 slot+1,
747                                 cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2));
748                 }
749                 if (tlb_type == cheetah_plus) {
750                         printk ("Contents of dtlb3:\n");
751                         for (slot = 0; slot < 512; slot+=2) {
752                                 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
753                                         slot,
754                                         cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3),
755                                         slot+1,
756                                         cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3));
757                         }
758                 }
759         }
760 }
761
762 extern unsigned long cmdline_memory_size;
763
764 /* Find a free area for the bootmem map, avoiding the kernel image
765  * and the initial ramdisk.
766  */
767 static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
768                                                unsigned long end_pfn)
769 {
770         unsigned long avoid_start, avoid_end, bootmap_size;
771         int i;
772
773         bootmap_size = ((end_pfn - start_pfn) + 7) / 8;
774         bootmap_size = ALIGN(bootmap_size, sizeof(long));
775
776         avoid_start = avoid_end = 0;
777 #ifdef CONFIG_BLK_DEV_INITRD
778         avoid_start = initrd_start;
779         avoid_end = PAGE_ALIGN(initrd_end);
780 #endif
781
782 #ifdef CONFIG_DEBUG_BOOTMEM
783         prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n",
784                     kern_base, PAGE_ALIGN(kern_base + kern_size),
785                     avoid_start, avoid_end);
786 #endif
787         for (i = 0; i < pavail_ents; i++) {
788                 unsigned long start, end;
789
790                 start = pavail[i].phys_addr;
791                 end = start + pavail[i].reg_size;
792
793                 while (start < end) {
794                         if (start >= kern_base &&
795                             start < PAGE_ALIGN(kern_base + kern_size)) {
796                                 start = PAGE_ALIGN(kern_base + kern_size);
797                                 continue;
798                         }
799                         if (start >= avoid_start && start < avoid_end) {
800                                 start = avoid_end;
801                                 continue;
802                         }
803
804                         if ((end - start) < bootmap_size)
805                                 break;
806
807                         if (start < kern_base &&
808                             (start + bootmap_size) > kern_base) {
809                                 start = PAGE_ALIGN(kern_base + kern_size);
810                                 continue;
811                         }
812
813                         if (start < avoid_start &&
814                             (start + bootmap_size) > avoid_start) {
815                                 start = avoid_end;
816                                 continue;
817                         }
818
819                         /* OK, it doesn't overlap anything, use it.  */
820 #ifdef CONFIG_DEBUG_BOOTMEM
821                         prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n",
822                                     start >> PAGE_SHIFT, start);
823 #endif
824                         return start >> PAGE_SHIFT;
825                 }
826         }
827
828         prom_printf("Cannot find free area for bootmap, aborting.\n");
829         prom_halt();
830 }
831
832 static unsigned long __init bootmem_init(unsigned long *pages_avail,
833                                          unsigned long phys_base)
834 {
835         unsigned long bootmap_size, end_pfn;
836         unsigned long end_of_phys_memory = 0UL;
837         unsigned long bootmap_pfn, bytes_avail, size;
838         int i;
839
840 #ifdef CONFIG_DEBUG_BOOTMEM
841         prom_printf("bootmem_init: Scan pavail, ");
842 #endif
843
844         bytes_avail = 0UL;
845         for (i = 0; i < pavail_ents; i++) {
846                 end_of_phys_memory = pavail[i].phys_addr +
847                         pavail[i].reg_size;
848                 bytes_avail += pavail[i].reg_size;
849                 if (cmdline_memory_size) {
850                         if (bytes_avail > cmdline_memory_size) {
851                                 unsigned long slack = bytes_avail - cmdline_memory_size;
852
853                                 bytes_avail -= slack;
854                                 end_of_phys_memory -= slack;
855
856                                 pavail[i].reg_size -= slack;
857                                 if ((long)pavail[i].reg_size <= 0L) {
858                                         pavail[i].phys_addr = 0xdeadbeefUL;
859                                         pavail[i].reg_size = 0UL;
860                                         pavail_ents = i;
861                                 } else {
862                                         pavail[i+1].reg_size = 0Ul;
863                                         pavail[i+1].phys_addr = 0xdeadbeefUL;
864                                         pavail_ents = i + 1;
865                                 }
866                                 break;
867                         }
868                 }
869         }
870
871         *pages_avail = bytes_avail >> PAGE_SHIFT;
872
873         end_pfn = end_of_phys_memory >> PAGE_SHIFT;
874
875 #ifdef CONFIG_BLK_DEV_INITRD
876         /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
877         if (sparc_ramdisk_image || sparc_ramdisk_image64) {
878                 unsigned long ramdisk_image = sparc_ramdisk_image ?
879                         sparc_ramdisk_image : sparc_ramdisk_image64;
880                 if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
881                         ramdisk_image -= KERNBASE;
882                 initrd_start = ramdisk_image + phys_base;
883                 initrd_end = initrd_start + sparc_ramdisk_size;
884                 if (initrd_end > end_of_phys_memory) {
885                         printk(KERN_CRIT "initrd extends beyond end of memory "
886                                          "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
887                                initrd_end, end_of_phys_memory);
888                         initrd_start = 0;
889                         initrd_end = 0;
890                 }
891         }
892 #endif  
893         /* Initialize the boot-time allocator. */
894         max_pfn = max_low_pfn = end_pfn;
895         min_low_pfn = (phys_base >> PAGE_SHIFT);
896
897         bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn);
898
899 #ifdef CONFIG_DEBUG_BOOTMEM
900         prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
901                     min_low_pfn, bootmap_pfn, max_low_pfn);
902 #endif
903         bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
904                                          min_low_pfn, end_pfn);
905
906         /* Now register the available physical memory with the
907          * allocator.
908          */
909         for (i = 0; i < pavail_ents; i++) {
910 #ifdef CONFIG_DEBUG_BOOTMEM
911                 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
912                             i, pavail[i].phys_addr, pavail[i].reg_size);
913 #endif
914                 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
915         }
916
917 #ifdef CONFIG_BLK_DEV_INITRD
918         if (initrd_start) {
919                 size = initrd_end - initrd_start;
920
921                 /* Resert the initrd image area. */
922 #ifdef CONFIG_DEBUG_BOOTMEM
923                 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
924                         initrd_start, initrd_end);
925 #endif
926                 reserve_bootmem(initrd_start, size);
927                 *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
928
929                 initrd_start += PAGE_OFFSET;
930                 initrd_end += PAGE_OFFSET;
931         }
932 #endif
933         /* Reserve the kernel text/data/bss. */
934 #ifdef CONFIG_DEBUG_BOOTMEM
935         prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
936 #endif
937         reserve_bootmem(kern_base, kern_size);
938         *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
939
940         /* Reserve the bootmem map.   We do not account for it
941          * in pages_avail because we will release that memory
942          * in free_all_bootmem.
943          */
944         size = bootmap_size;
945 #ifdef CONFIG_DEBUG_BOOTMEM
946         prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
947                     (bootmap_pfn << PAGE_SHIFT), size);
948 #endif
949         reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
950         *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
951
952         for (i = 0; i < pavail_ents; i++) {
953                 unsigned long start_pfn, end_pfn;
954
955                 start_pfn = pavail[i].phys_addr >> PAGE_SHIFT;
956                 end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT));
957 #ifdef CONFIG_DEBUG_BOOTMEM
958                 prom_printf("memory_present(0, %lx, %lx)\n",
959                             start_pfn, end_pfn);
960 #endif
961                 memory_present(0, start_pfn, end_pfn);
962         }
963
964         sparse_init();
965
966         return end_pfn;
967 }
968
969 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
970 static int pall_ents __initdata;
971
972 #ifdef CONFIG_DEBUG_PAGEALLOC
973 static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
974 {
975         unsigned long vstart = PAGE_OFFSET + pstart;
976         unsigned long vend = PAGE_OFFSET + pend;
977         unsigned long alloc_bytes = 0UL;
978
979         if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
980                 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
981                             vstart, vend);
982                 prom_halt();
983         }
984
985         while (vstart < vend) {
986                 unsigned long this_end, paddr = __pa(vstart);
987                 pgd_t *pgd = pgd_offset_k(vstart);
988                 pud_t *pud;
989                 pmd_t *pmd;
990                 pte_t *pte;
991
992                 pud = pud_offset(pgd, vstart);
993                 if (pud_none(*pud)) {
994                         pmd_t *new;
995
996                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
997                         alloc_bytes += PAGE_SIZE;
998                         pud_populate(&init_mm, pud, new);
999                 }
1000
1001                 pmd = pmd_offset(pud, vstart);
1002                 if (!pmd_present(*pmd)) {
1003                         pte_t *new;
1004
1005                         new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1006                         alloc_bytes += PAGE_SIZE;
1007                         pmd_populate_kernel(&init_mm, pmd, new);
1008                 }
1009
1010                 pte = pte_offset_kernel(pmd, vstart);
1011                 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1012                 if (this_end > vend)
1013                         this_end = vend;
1014
1015                 while (vstart < this_end) {
1016                         pte_val(*pte) = (paddr | pgprot_val(prot));
1017
1018                         vstart += PAGE_SIZE;
1019                         paddr += PAGE_SIZE;
1020                         pte++;
1021                 }
1022         }
1023
1024         return alloc_bytes;
1025 }
1026
1027 extern unsigned int kvmap_linear_patch[1];
1028 #endif /* CONFIG_DEBUG_PAGEALLOC */
1029
1030 static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1031 {
1032         const unsigned long shift_256MB = 28;
1033         const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1034         const unsigned long size_256MB = (1UL << shift_256MB);
1035
1036         while (start < end) {
1037                 long remains;
1038
1039                 remains = end - start;
1040                 if (remains < size_256MB)
1041                         break;
1042
1043                 if (start & mask_256MB) {
1044                         start = (start + size_256MB) & ~mask_256MB;
1045                         continue;
1046                 }
1047
1048                 while (remains >= size_256MB) {
1049                         unsigned long index = start >> shift_256MB;
1050
1051                         __set_bit(index, kpte_linear_bitmap);
1052
1053                         start += size_256MB;
1054                         remains -= size_256MB;
1055                 }
1056         }
1057 }
1058
1059 static void __init kernel_physical_mapping_init(void)
1060 {
1061         unsigned long i;
1062 #ifdef CONFIG_DEBUG_PAGEALLOC
1063         unsigned long mem_alloced = 0UL;
1064 #endif
1065
1066         read_obp_memory("reg", &pall[0], &pall_ents);
1067
1068         for (i = 0; i < pall_ents; i++) {
1069                 unsigned long phys_start, phys_end;
1070
1071                 phys_start = pall[i].phys_addr;
1072                 phys_end = phys_start + pall[i].reg_size;
1073
1074                 mark_kpte_bitmap(phys_start, phys_end);
1075
1076 #ifdef CONFIG_DEBUG_PAGEALLOC
1077                 mem_alloced += kernel_map_range(phys_start, phys_end,
1078                                                 PAGE_KERNEL);
1079 #endif
1080         }
1081
1082 #ifdef CONFIG_DEBUG_PAGEALLOC
1083         printk("Allocated %ld bytes for kernel page tables.\n",
1084                mem_alloced);
1085
1086         kvmap_linear_patch[0] = 0x01000000; /* nop */
1087         flushi(&kvmap_linear_patch[0]);
1088
1089         __flush_tlb_all();
1090 #endif
1091 }
1092
1093 #ifdef CONFIG_DEBUG_PAGEALLOC
1094 void kernel_map_pages(struct page *page, int numpages, int enable)
1095 {
1096         unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1097         unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1098
1099         kernel_map_range(phys_start, phys_end,
1100                          (enable ? PAGE_KERNEL : __pgprot(0)));
1101
1102         flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1103                                PAGE_OFFSET + phys_end);
1104
1105         /* we should perform an IPI and flush all tlbs,
1106          * but that can deadlock->flush only current cpu.
1107          */
1108         __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1109                                  PAGE_OFFSET + phys_end);
1110 }
1111 #endif
1112
1113 unsigned long __init find_ecache_flush_span(unsigned long size)
1114 {
1115         int i;
1116
1117         for (i = 0; i < pavail_ents; i++) {
1118                 if (pavail[i].reg_size >= size)
1119                         return pavail[i].phys_addr;
1120         }
1121
1122         return ~0UL;
1123 }
1124
1125 static void __init tsb_phys_patch(void)
1126 {
1127         struct tsb_ldquad_phys_patch_entry *pquad;
1128         struct tsb_phys_patch_entry *p;
1129
1130         pquad = &__tsb_ldquad_phys_patch;
1131         while (pquad < &__tsb_ldquad_phys_patch_end) {
1132                 unsigned long addr = pquad->addr;
1133
1134                 if (tlb_type == hypervisor)
1135                         *(unsigned int *) addr = pquad->sun4v_insn;
1136                 else
1137                         *(unsigned int *) addr = pquad->sun4u_insn;
1138                 wmb();
1139                 __asm__ __volatile__("flush     %0"
1140                                      : /* no outputs */
1141                                      : "r" (addr));
1142
1143                 pquad++;
1144         }
1145
1146         p = &__tsb_phys_patch;
1147         while (p < &__tsb_phys_patch_end) {
1148                 unsigned long addr = p->addr;
1149
1150                 *(unsigned int *) addr = p->insn;
1151                 wmb();
1152                 __asm__ __volatile__("flush     %0"
1153                                      : /* no outputs */
1154                                      : "r" (addr));
1155
1156                 p++;
1157         }
1158 }
1159
1160 /* Don't mark as init, we give this to the Hypervisor.  */
1161 static struct hv_tsb_descr ktsb_descr[2];
1162 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1163
1164 static void __init sun4v_ktsb_init(void)
1165 {
1166         unsigned long ktsb_pa;
1167
1168         /* First KTSB for PAGE_SIZE mappings.  */
1169         ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1170
1171         switch (PAGE_SIZE) {
1172         case 8 * 1024:
1173         default:
1174                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1175                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1176                 break;
1177
1178         case 64 * 1024:
1179                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1180                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1181                 break;
1182
1183         case 512 * 1024:
1184                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1185                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1186                 break;
1187
1188         case 4 * 1024 * 1024:
1189                 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1190                 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1191                 break;
1192         };
1193
1194         ktsb_descr[0].assoc = 1;
1195         ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1196         ktsb_descr[0].ctx_idx = 0;
1197         ktsb_descr[0].tsb_base = ktsb_pa;
1198         ktsb_descr[0].resv = 0;
1199
1200         /* Second KTSB for 4MB/256MB mappings.  */
1201         ktsb_pa = (kern_base +
1202                    ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1203
1204         ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1205         ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1206                                    HV_PGSZ_MASK_256MB);
1207         ktsb_descr[1].assoc = 1;
1208         ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1209         ktsb_descr[1].ctx_idx = 0;
1210         ktsb_descr[1].tsb_base = ktsb_pa;
1211         ktsb_descr[1].resv = 0;
1212 }
1213
1214 void __cpuinit sun4v_ktsb_register(void)
1215 {
1216         register unsigned long func asm("%o5");
1217         register unsigned long arg0 asm("%o0");
1218         register unsigned long arg1 asm("%o1");
1219         unsigned long pa;
1220
1221         pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1222
1223         func = HV_FAST_MMU_TSB_CTX0;
1224         arg0 = 2;
1225         arg1 = pa;
1226         __asm__ __volatile__("ta        %6"
1227                              : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
1228                              : "0" (func), "1" (arg0), "2" (arg1),
1229                                "i" (HV_FAST_TRAP));
1230 }
1231
1232 /* paging_init() sets up the page tables */
1233
1234 extern void cheetah_ecache_flush_init(void);
1235 extern void sun4v_patch_tlb_handlers(void);
1236
1237 static unsigned long last_valid_pfn;
1238 pgd_t swapper_pg_dir[2048];
1239
1240 static void sun4u_pgprot_init(void);
1241 static void sun4v_pgprot_init(void);
1242
1243 void __init paging_init(void)
1244 {
1245         unsigned long end_pfn, pages_avail, shift, phys_base;
1246         unsigned long real_end, i;
1247
1248         kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1249         kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1250
1251         /* Invalidate both kernel TSBs.  */
1252         memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1253         memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1254
1255         if (tlb_type == hypervisor)
1256                 sun4v_pgprot_init();
1257         else
1258                 sun4u_pgprot_init();
1259
1260         if (tlb_type == cheetah_plus ||
1261             tlb_type == hypervisor)
1262                 tsb_phys_patch();
1263
1264         if (tlb_type == hypervisor) {
1265                 sun4v_patch_tlb_handlers();
1266                 sun4v_ktsb_init();
1267         }
1268
1269         /* Find available physical memory... */
1270         read_obp_memory("available", &pavail[0], &pavail_ents);
1271
1272         phys_base = 0xffffffffffffffffUL;
1273         for (i = 0; i < pavail_ents; i++)
1274                 phys_base = min(phys_base, pavail[i].phys_addr);
1275
1276         set_bit(0, mmu_context_bmap);
1277
1278         shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1279
1280         real_end = (unsigned long)_end;
1281         if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1282                 bigkernel = 1;
1283         if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1284                 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1285                 prom_halt();
1286         }
1287
1288         /* Set kernel pgd to upper alias so physical page computations
1289          * work.
1290          */
1291         init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1292         
1293         memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1294
1295         /* Now can init the kernel/bad page tables. */
1296         pud_set(pud_offset(&swapper_pg_dir[0], 0),
1297                 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1298         
1299         inherit_prom_mappings();
1300         
1301         /* Ok, we can use our TLB miss and window trap handlers safely.  */
1302         setup_tba();
1303
1304         __flush_tlb_all();
1305
1306         if (tlb_type == hypervisor)
1307                 sun4v_ktsb_register();
1308
1309         /* Setup bootmem... */
1310         pages_avail = 0;
1311         last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
1312
1313         max_mapnr = last_valid_pfn;
1314
1315         kernel_physical_mapping_init();
1316
1317         {
1318                 unsigned long zones_size[MAX_NR_ZONES];
1319                 unsigned long zholes_size[MAX_NR_ZONES];
1320                 int znum;
1321
1322                 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1323                         zones_size[znum] = zholes_size[znum] = 0;
1324
1325                 zones_size[ZONE_DMA] = end_pfn;
1326                 zholes_size[ZONE_DMA] = end_pfn - pages_avail;
1327
1328                 free_area_init_node(0, &contig_page_data, zones_size,
1329                                     __pa(PAGE_OFFSET) >> PAGE_SHIFT,
1330                                     zholes_size);
1331         }
1332
1333         device_scan();
1334 }
1335
1336 static void __init taint_real_pages(void)
1337 {
1338         int i;
1339
1340         read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1341
1342         /* Find changes discovered in the physmem available rescan and
1343          * reserve the lost portions in the bootmem maps.
1344          */
1345         for (i = 0; i < pavail_ents; i++) {
1346                 unsigned long old_start, old_end;
1347
1348                 old_start = pavail[i].phys_addr;
1349                 old_end = old_start +
1350                         pavail[i].reg_size;
1351                 while (old_start < old_end) {
1352                         int n;
1353
1354                         for (n = 0; pavail_rescan_ents; n++) {
1355                                 unsigned long new_start, new_end;
1356
1357                                 new_start = pavail_rescan[n].phys_addr;
1358                                 new_end = new_start +
1359                                         pavail_rescan[n].reg_size;
1360
1361                                 if (new_start <= old_start &&
1362                                     new_end >= (old_start + PAGE_SIZE)) {
1363                                         set_bit(old_start >> 22,
1364                                                 sparc64_valid_addr_bitmap);
1365                                         goto do_next_page;
1366                                 }
1367                         }
1368                         reserve_bootmem(old_start, PAGE_SIZE);
1369
1370                 do_next_page:
1371                         old_start += PAGE_SIZE;
1372                 }
1373         }
1374 }
1375
1376 void __init mem_init(void)
1377 {
1378         unsigned long codepages, datapages, initpages;
1379         unsigned long addr, last;
1380         int i;
1381
1382         i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1383         i += 1;
1384         sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1385         if (sparc64_valid_addr_bitmap == NULL) {
1386                 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1387                 prom_halt();
1388         }
1389         memset(sparc64_valid_addr_bitmap, 0, i << 3);
1390
1391         addr = PAGE_OFFSET + kern_base;
1392         last = PAGE_ALIGN(kern_size) + addr;
1393         while (addr < last) {
1394                 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1395                 addr += PAGE_SIZE;
1396         }
1397
1398         taint_real_pages();
1399
1400         high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1401
1402 #ifdef CONFIG_DEBUG_BOOTMEM
1403         prom_printf("mem_init: Calling free_all_bootmem().\n");
1404 #endif
1405         totalram_pages = num_physpages = free_all_bootmem() - 1;
1406
1407         /*
1408          * Set up the zero page, mark it reserved, so that page count
1409          * is not manipulated when freeing the page from user ptes.
1410          */
1411         mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1412         if (mem_map_zero == NULL) {
1413                 prom_printf("paging_init: Cannot alloc zero page.\n");
1414                 prom_halt();
1415         }
1416         SetPageReserved(mem_map_zero);
1417
1418         codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1419         codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1420         datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1421         datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1422         initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1423         initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1424
1425         printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1426                nr_free_pages() << (PAGE_SHIFT-10),
1427                codepages << (PAGE_SHIFT-10),
1428                datapages << (PAGE_SHIFT-10), 
1429                initpages << (PAGE_SHIFT-10), 
1430                PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1431
1432         if (tlb_type == cheetah || tlb_type == cheetah_plus)
1433                 cheetah_ecache_flush_init();
1434 }
1435
1436 void free_initmem(void)
1437 {
1438         unsigned long addr, initend;
1439
1440         /*
1441          * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1442          */
1443         addr = PAGE_ALIGN((unsigned long)(__init_begin));
1444         initend = (unsigned long)(__init_end) & PAGE_MASK;
1445         for (; addr < initend; addr += PAGE_SIZE) {
1446                 unsigned long page;
1447                 struct page *p;
1448
1449                 page = (addr +
1450                         ((unsigned long) __va(kern_base)) -
1451                         ((unsigned long) KERNBASE));
1452                 memset((void *)addr, 0xcc, PAGE_SIZE);
1453                 p = virt_to_page(page);
1454
1455                 ClearPageReserved(p);
1456                 set_page_count(p, 1);
1457                 __free_page(p);
1458                 num_physpages++;
1459                 totalram_pages++;
1460         }
1461 }
1462
1463 #ifdef CONFIG_BLK_DEV_INITRD
1464 void free_initrd_mem(unsigned long start, unsigned long end)
1465 {
1466         if (start < end)
1467                 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1468         for (; start < end; start += PAGE_SIZE) {
1469                 struct page *p = virt_to_page(start);
1470
1471                 ClearPageReserved(p);
1472                 set_page_count(p, 1);
1473                 __free_page(p);
1474                 num_physpages++;
1475                 totalram_pages++;
1476         }
1477 }
1478 #endif
1479
1480 #define _PAGE_CACHE_4U  (_PAGE_CP_4U | _PAGE_CV_4U)
1481 #define _PAGE_CACHE_4V  (_PAGE_CP_4V | _PAGE_CV_4V)
1482 #define __DIRTY_BITS_4U  (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1483 #define __DIRTY_BITS_4V  (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1484 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1485 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1486
1487 pgprot_t PAGE_KERNEL __read_mostly;
1488 EXPORT_SYMBOL(PAGE_KERNEL);
1489
1490 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
1491 pgprot_t PAGE_COPY __read_mostly;
1492
1493 pgprot_t PAGE_SHARED __read_mostly;
1494 EXPORT_SYMBOL(PAGE_SHARED);
1495
1496 pgprot_t PAGE_EXEC __read_mostly;
1497 unsigned long pg_iobits __read_mostly;
1498
1499 unsigned long _PAGE_IE __read_mostly;
1500
1501 unsigned long _PAGE_E __read_mostly;
1502 EXPORT_SYMBOL(_PAGE_E);
1503
1504 unsigned long _PAGE_CACHE __read_mostly;
1505 EXPORT_SYMBOL(_PAGE_CACHE);
1506
1507 static void prot_init_common(unsigned long page_none,
1508                              unsigned long page_shared,
1509                              unsigned long page_copy,
1510                              unsigned long page_readonly,
1511                              unsigned long page_exec_bit)
1512 {
1513         PAGE_COPY = __pgprot(page_copy);
1514         PAGE_SHARED = __pgprot(page_shared);
1515
1516         protection_map[0x0] = __pgprot(page_none);
1517         protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
1518         protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
1519         protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
1520         protection_map[0x4] = __pgprot(page_readonly);
1521         protection_map[0x5] = __pgprot(page_readonly);
1522         protection_map[0x6] = __pgprot(page_copy);
1523         protection_map[0x7] = __pgprot(page_copy);
1524         protection_map[0x8] = __pgprot(page_none);
1525         protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
1526         protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
1527         protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
1528         protection_map[0xc] = __pgprot(page_readonly);
1529         protection_map[0xd] = __pgprot(page_readonly);
1530         protection_map[0xe] = __pgprot(page_shared);
1531         protection_map[0xf] = __pgprot(page_shared);
1532 }
1533
1534 static void __init sun4u_pgprot_init(void)
1535 {
1536         unsigned long page_none, page_shared, page_copy, page_readonly;
1537         unsigned long page_exec_bit;
1538
1539         PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1540                                 _PAGE_CACHE_4U | _PAGE_P_4U |
1541                                 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1542                                 _PAGE_EXEC_4U);
1543         PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1544                                        _PAGE_CACHE_4U | _PAGE_P_4U |
1545                                        __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1546                                        _PAGE_EXEC_4U | _PAGE_L_4U);
1547         PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
1548
1549         _PAGE_IE = _PAGE_IE_4U;
1550         _PAGE_E = _PAGE_E_4U;
1551         _PAGE_CACHE = _PAGE_CACHE_4U;
1552
1553         pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
1554                      __ACCESS_BITS_4U | _PAGE_E_4U);
1555
1556         kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
1557                 0xfffff80000000000;
1558         kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
1559                                    _PAGE_P_4U | _PAGE_W_4U);
1560
1561         /* XXX Should use 256MB on Panther. XXX */
1562         kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1563
1564         _PAGE_SZBITS = _PAGE_SZBITS_4U;
1565         _PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
1566                               _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
1567                               _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
1568
1569
1570         page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
1571         page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1572                        __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
1573         page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1574                        __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1575         page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1576                            __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1577
1578         page_exec_bit = _PAGE_EXEC_4U;
1579
1580         prot_init_common(page_none, page_shared, page_copy, page_readonly,
1581                          page_exec_bit);
1582 }
1583
1584 static void __init sun4v_pgprot_init(void)
1585 {
1586         unsigned long page_none, page_shared, page_copy, page_readonly;
1587         unsigned long page_exec_bit;
1588
1589         PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
1590                                 _PAGE_CACHE_4V | _PAGE_P_4V |
1591                                 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
1592                                 _PAGE_EXEC_4V);
1593         PAGE_KERNEL_LOCKED = PAGE_KERNEL;
1594         PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
1595
1596         _PAGE_IE = _PAGE_IE_4V;
1597         _PAGE_E = _PAGE_E_4V;
1598         _PAGE_CACHE = _PAGE_CACHE_4V;
1599
1600         kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
1601                 0xfffff80000000000;
1602         kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1603                                    _PAGE_P_4V | _PAGE_W_4V);
1604
1605         kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1606                 0xfffff80000000000;
1607         kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1608                                    _PAGE_P_4V | _PAGE_W_4V);
1609
1610         pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
1611                      __ACCESS_BITS_4V | _PAGE_E_4V);
1612
1613         _PAGE_SZBITS = _PAGE_SZBITS_4V;
1614         _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
1615                              _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
1616                              _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
1617                              _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
1618
1619         page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
1620         page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1621                        __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
1622         page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1623                        __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1624         page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1625                          __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1626
1627         page_exec_bit = _PAGE_EXEC_4V;
1628
1629         prot_init_common(page_none, page_shared, page_copy, page_readonly,
1630                          page_exec_bit);
1631 }
1632
1633 unsigned long pte_sz_bits(unsigned long sz)
1634 {
1635         if (tlb_type == hypervisor) {
1636                 switch (sz) {
1637                 case 8 * 1024:
1638                 default:
1639                         return _PAGE_SZ8K_4V;
1640                 case 64 * 1024:
1641                         return _PAGE_SZ64K_4V;
1642                 case 512 * 1024:
1643                         return _PAGE_SZ512K_4V;
1644                 case 4 * 1024 * 1024:
1645                         return _PAGE_SZ4MB_4V;
1646                 };
1647         } else {
1648                 switch (sz) {
1649                 case 8 * 1024:
1650                 default:
1651                         return _PAGE_SZ8K_4U;
1652                 case 64 * 1024:
1653                         return _PAGE_SZ64K_4U;
1654                 case 512 * 1024:
1655                         return _PAGE_SZ512K_4U;
1656                 case 4 * 1024 * 1024:
1657                         return _PAGE_SZ4MB_4U;
1658                 };
1659         }
1660 }
1661
1662 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
1663 {
1664         pte_t pte;
1665
1666         pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
1667         pte_val(pte) |= (((unsigned long)space) << 32);
1668         pte_val(pte) |= pte_sz_bits(page_size);
1669
1670         return pte;
1671 }
1672
1673 static unsigned long kern_large_tte(unsigned long paddr)
1674 {
1675         unsigned long val;
1676
1677         val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1678                _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
1679                _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
1680         if (tlb_type == hypervisor)
1681                 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1682                        _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
1683                        _PAGE_EXEC_4V | _PAGE_W_4V);
1684
1685         return val | paddr;
1686 }
1687
1688 /*
1689  * Translate PROM's mapping we capture at boot time into physical address.
1690  * The second parameter is only set from prom_callback() invocations.
1691  */
1692 unsigned long prom_virt_to_phys(unsigned long promva, int *error)
1693 {
1694         unsigned long mask;
1695         int i;
1696
1697         mask = _PAGE_PADDR_4U;
1698         if (tlb_type == hypervisor)
1699                 mask = _PAGE_PADDR_4V;
1700
1701         for (i = 0; i < prom_trans_ents; i++) {
1702                 struct linux_prom_translation *p = &prom_trans[i];
1703
1704                 if (promva >= p->virt &&
1705                     promva < (p->virt + p->size)) {
1706                         unsigned long base = p->data & mask;
1707
1708                         if (error)
1709                                 *error = 0;
1710                         return base + (promva & (8192 - 1));
1711                 }
1712         }
1713         if (error)
1714                 *error = 1;
1715         return 0UL;
1716 }
1717
1718 /* XXX We should kill off this ugly thing at so me point. XXX */
1719 unsigned long sun4u_get_pte(unsigned long addr)
1720 {
1721         pgd_t *pgdp;
1722         pud_t *pudp;
1723         pmd_t *pmdp;
1724         pte_t *ptep;
1725         unsigned long mask = _PAGE_PADDR_4U;
1726
1727         if (tlb_type == hypervisor)
1728                 mask = _PAGE_PADDR_4V;
1729
1730         if (addr >= PAGE_OFFSET)
1731                 return addr & mask;
1732
1733         if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
1734                 return prom_virt_to_phys(addr, NULL);
1735
1736         pgdp = pgd_offset_k(addr);
1737         pudp = pud_offset(pgdp, addr);
1738         pmdp = pmd_offset(pudp, addr);
1739         ptep = pte_offset_kernel(pmdp, addr);
1740
1741         return pte_val(*ptep) & mask;
1742 }
1743
1744 /* If not locked, zap it. */
1745 void __flush_tlb_all(void)
1746 {
1747         unsigned long pstate;
1748         int i;
1749
1750         __asm__ __volatile__("flushw\n\t"
1751                              "rdpr      %%pstate, %0\n\t"
1752                              "wrpr      %0, %1, %%pstate"
1753                              : "=r" (pstate)
1754                              : "i" (PSTATE_IE));
1755         if (tlb_type == spitfire) {
1756                 for (i = 0; i < 64; i++) {
1757                         /* Spitfire Errata #32 workaround */
1758                         /* NOTE: Always runs on spitfire, so no
1759                          *       cheetah+ page size encodings.
1760                          */
1761                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
1762                                              "flush     %%g6"
1763                                              : /* No outputs */
1764                                              : "r" (0),
1765                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1766
1767                         if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
1768                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1769                                                      "membar #Sync"
1770                                                      : /* no outputs */
1771                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
1772                                 spitfire_put_dtlb_data(i, 0x0UL);
1773                         }
1774
1775                         /* Spitfire Errata #32 workaround */
1776                         /* NOTE: Always runs on spitfire, so no
1777                          *       cheetah+ page size encodings.
1778                          */
1779                         __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
1780                                              "flush     %%g6"
1781                                              : /* No outputs */
1782                                              : "r" (0),
1783                                              "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1784
1785                         if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
1786                                 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1787                                                      "membar #Sync"
1788                                                      : /* no outputs */
1789                                                      : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1790                                 spitfire_put_itlb_data(i, 0x0UL);
1791                         }
1792                 }
1793         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1794                 cheetah_flush_dtlb_all();
1795                 cheetah_flush_itlb_all();
1796         }
1797         __asm__ __volatile__("wrpr      %0, 0, %%pstate"
1798                              : : "r" (pstate));
1799 }