}
static unsigned long __meminit
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
+phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
+ unsigned long page_size_mask)
{
unsigned long pages = 0;
continue;
}
- if (cpu_has_pse) {
+ if (page_size_mask & (1<<PG_LEVEL_2M)) {
pages++;
set_pte((pte_t *)pmd,
pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
}
static unsigned long __meminit
-phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
+ unsigned long page_size_mask)
{
pmd_t *pmd = pmd_offset(pud, 0);
unsigned long last_map_addr;
spin_lock(&init_mm.page_table_lock);
- last_map_addr = phys_pmd_init(pmd, address, end);
+ last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
spin_unlock(&init_mm.page_table_lock);
__flush_tlb_all();
return last_map_addr;
}
static unsigned long __meminit
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
+phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
+ unsigned long page_size_mask)
{
unsigned long pages = 0;
unsigned long last_map_addr = end;
if (pud_val(*pud)) {
if (!pud_large(*pud))
- last_map_addr = phys_pmd_update(pud, addr, end);
+ last_map_addr = phys_pmd_update(pud, addr, end,
+ page_size_mask);
continue;
}
- if (direct_gbpages) {
+ if (page_size_mask & (1<<PG_LEVEL_1G)) {
pages++;
set_pte((pte_t *)pud,
pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
pmd = alloc_low_page(&pmd_phys);
spin_lock(&init_mm.page_table_lock);
- last_map_addr = phys_pmd_init(pmd, addr, end);
+ last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
unmap_low_page(pmd);
pud_populate(&init_mm, pud, __va(pmd_phys));
spin_unlock(&init_mm.page_table_lock);
}
static unsigned long __meminit
-phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end)
+phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
+ unsigned long page_size_mask)
{
pud_t *pud;
pud = (pud_t *)pgd_page_vaddr(*pgd);
- return phys_pud_init(pud, addr, end);
+ return phys_pud_init(pud, addr, end, page_size_mask);
}
static void __init find_early_table_space(unsigned long end)
}
#endif
-/*
- * Setup the direct mapping of the physical memory at PAGE_OFFSET.
- * This runs before bootmem is initialized and gets pages directly from
- * the physical memory. To access them they are temporarily mapped.
- */
-unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+static unsigned long __init kernel_physical_mapping_init(unsigned long start,
+ unsigned long end,
+ unsigned long page_size_mask)
{
- unsigned long next, last_map_addr = end;
- unsigned long start_phys = start, end_phys = end;
- printk(KERN_INFO "init_memory_mapping\n");
-
- /*
- * Find space for the kernel direct mapping tables.
- *
- * Later we should allocate these tables in the local node of the
- * memory mapped. Unfortunately this is done currently before the
- * nodes are discovered.
- */
- if (!after_bootmem) {
- init_gbpages();
- find_early_table_space(end);
- }
+ unsigned long next, last_map_addr = end;
start = (unsigned long)__va(start);
end = (unsigned long)__va(end);
next = end;
if (pgd_val(*pgd)) {
- last_map_addr = phys_pud_update(pgd, __pa(start), __pa(end));
+ last_map_addr = phys_pud_update(pgd, __pa(start),
+ __pa(end), page_size_mask);
continue;
}
else
pud = alloc_low_page(&pud_phys);
- last_map_addr = phys_pud_init(pud, __pa(start), __pa(next));
+ last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
+ page_size_mask);
unmap_low_page(pud);
pgd_populate(&init_mm, pgd_offset_k(start),
__va(pud_phys));
}
+ return last_map_addr;
+}
+/*
+ * Setup the direct mapping of the physical memory at PAGE_OFFSET.
+ * This runs before bootmem is initialized and gets pages directly from
+ * the physical memory. To access them they are temporarily mapped.
+ */
+unsigned long __init_refok init_memory_mapping(unsigned long start,
+ unsigned long end)
+{
+ unsigned long last_map_addr;
+ unsigned long page_size_mask = 0;
+
+ printk(KERN_INFO "init_memory_mapping\n");
+
+ /*
+ * Find space for the kernel direct mapping tables.
+ *
+ * Later we should allocate these tables in the local node of the
+ * memory mapped. Unfortunately this is done currently before the
+ * nodes are discovered.
+ */
+ if (!after_bootmem) {
+ init_gbpages();
+ find_early_table_space(end);
+ }
+
+ if (direct_gbpages)
+ page_size_mask |= 1 << PG_LEVEL_1G;
+ if (cpu_has_pse)
+ page_size_mask |= 1 << PG_LEVEL_2M;
+
+ last_map_addr = kernel_physical_mapping_init(start, end,
+ page_size_mask);
+
if (!after_bootmem)
mmu_cr4_features = read_cr4();
__flush_tlb_all();
- if (!after_bootmem)
+ if (!after_bootmem && table_end > table_start)
reserve_early(table_start << PAGE_SHIFT,
table_end << PAGE_SHIFT, "PGTABLE");
+ printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
+ last_map_addr, end);
+
if (!after_bootmem)
- early_memtest(start_phys, end_phys);
+ early_memtest(start, end);
return last_map_addr >> PAGE_SHIFT;
}