#define dcache_dirty_cpu(page) \
(((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
-static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
+static inline void set_dcache_dirty(struct page *page, int this_cpu)
{
unsigned long mask = this_cpu;
unsigned long non_cpu_bits;
: "g1", "g7");
}
-static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
+static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
{
unsigned long mask = (1UL << PG_dcache_dirty);
prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
initrd_start, initrd_end);
#endif
- reserve_bootmem(initrd_start, size);
+ reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
initrd_start += PAGE_OFFSET;
initrd_end += PAGE_OFFSET;
#ifdef CONFIG_DEBUG_BOOTMEM
prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
#endif
- reserve_bootmem(kern_base, kern_size);
+ reserve_bootmem(kern_base, kern_size, BOOTMEM_DEFAULT);
*pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
/* Add back in the initmem pages. */
prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
(bootmap_pfn << PAGE_SHIFT), size);
#endif
- reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
+ reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
for (i = 0; i < pavail_ents; i++) {
unsigned long start_pfn, end_pfn;
}
}
-static void __init kernel_physical_mapping_init(void)
+static void __init init_kpte_bitmap(void)
{
unsigned long i;
-#ifdef CONFIG_DEBUG_PAGEALLOC
- unsigned long mem_alloced = 0UL;
-#endif
-
- read_obp_memory("reg", &pall[0], &pall_ents);
for (i = 0; i < pall_ents; i++) {
unsigned long phys_start, phys_end;
phys_end = phys_start + pall[i].reg_size;
mark_kpte_bitmap(phys_start, phys_end);
+ }
+}
+static void __init kernel_physical_mapping_init(void)
+{
#ifdef CONFIG_DEBUG_PAGEALLOC
+ unsigned long i, mem_alloced = 0UL;
+
+ for (i = 0; i < pall_ents; i++) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = pall[i].phys_addr;
+ phys_end = phys_start + pall[i].reg_size;
+
mem_alloced += kernel_map_range(phys_start, phys_end,
PAGE_KERNEL);
-#endif
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
printk("Allocated %ld bytes for kernel page tables.\n",
mem_alloced);
static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void);
+/* Dummy function */
+void __init setup_per_cpu_areas(void)
+{
+}
+
void __init paging_init(void)
{
unsigned long end_pfn, pages_avail, shift, phys_base;
inherit_prom_mappings();
+ read_obp_memory("reg", &pall[0], &pall_ents);
+
+ init_kpte_bitmap();
+
/* Ok, we can use our TLB miss and window trap handlers safely. */
setup_tba();
goto do_next_page;
}
}
- reserve_bootmem(old_start, PAGE_SIZE);
+ reserve_bootmem(old_start, PAGE_SIZE, BOOTMEM_DEFAULT);
do_next_page:
old_start += PAGE_SIZE;
"wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
- if (tlb_type == spitfire) {
+ if (tlb_type == hypervisor) {
+ sun4v_mmu_demap_all();
+ } else if (tlb_type == spitfire) {
for (i = 0; i < 64; i++) {
/* Spitfire Errata #32 workaround */
/* NOTE: Always runs on spitfire, so no