]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/x86/mm/init_64.c
x86: introduce page_size_mask for 64bit
[linux-2.6-omap-h63xx.git] / arch / x86 / mm / init_64.c
index b10b7f17ea58d414576ded22b3c1b713fb06d6b6..7227a0a3f3a0cb70a52decb2715f681d1c330d3c 100644 (file)
@@ -201,6 +201,46 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval)
        set_pte_vaddr_pud(pud_page, vaddr, pteval);
 }
 
+/*
+ * Create large page table mappings for a range of physical addresses.
+ */
+static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+                                               pgprot_t prot)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
+       for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
+               pgd = pgd_offset_k((unsigned long)__va(phys));
+               if (pgd_none(*pgd)) {
+                       pud = (pud_t *) spp_getpage();
+                       set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
+                                               _PAGE_USER));
+               }
+               pud = pud_offset(pgd, (unsigned long)__va(phys));
+               if (pud_none(*pud)) {
+                       pmd = (pmd_t *) spp_getpage();
+                       set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
+                                               _PAGE_USER));
+               }
+               pmd = pmd_offset(pud, phys);
+               BUG_ON(!pmd_none(*pmd));
+               set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
+       }
+}
+
+void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
+{
+       __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
+}
+
+void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
+{
+       __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
+}
+
 /*
  * The head.S code sets up the kernel high mapping:
  *
@@ -300,7 +340,8 @@ phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
 }
 
 static unsigned long __meminit
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
+phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
+                        unsigned long page_size_mask)
 {
        unsigned long pages = 0;
 
@@ -325,7 +366,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
                        continue;
                }
 
-               if (cpu_has_pse) {
+               if (page_size_mask & (1<<PG_LEVEL_2M)) {
                        pages++;
                        set_pte((pte_t *)pmd,
                                pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
@@ -343,20 +384,22 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
 }
 
 static unsigned long __meminit
-phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
+                        unsigned long page_size_mask)
 {
        pmd_t *pmd = pmd_offset(pud, 0);
        unsigned long last_map_addr;
 
        spin_lock(&init_mm.page_table_lock);
-       last_map_addr = phys_pmd_init(pmd, address, end);
+       last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
        spin_unlock(&init_mm.page_table_lock);
        __flush_tlb_all();
        return last_map_addr;
 }
 
 static unsigned long __meminit
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
+phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
+                        unsigned long page_size_mask)
 {
        unsigned long pages = 0;
        unsigned long last_map_addr = end;
@@ -378,11 +421,12 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
 
                if (pud_val(*pud)) {
                        if (!pud_large(*pud))
-                               last_map_addr = phys_pmd_update(pud, addr, end);
+                               last_map_addr = phys_pmd_update(pud, addr, end,
+                                                        page_size_mask);
                        continue;
                }
 
-               if (direct_gbpages) {
+               if (page_size_mask & (1<<PG_LEVEL_1G)) {
                        pages++;
                        set_pte((pte_t *)pud,
                                pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
@@ -393,7 +437,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
                pmd = alloc_low_page(&pmd_phys);
 
                spin_lock(&init_mm.page_table_lock);
-               last_map_addr = phys_pmd_init(pmd, addr, end);
+               last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
                unmap_low_page(pmd);
                pud_populate(&init_mm, pud, __va(pmd_phys));
                spin_unlock(&init_mm.page_table_lock);
@@ -406,13 +450,14 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
 }
 
 static unsigned long __meminit
-phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end)
+phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
+                unsigned long page_size_mask)
 {
        pud_t *pud;
 
        pud = (pud_t *)pgd_page_vaddr(*pgd);
 
-       return phys_pud_init(pud, addr, end);
+       return phys_pud_init(pud, addr, end, page_size_mask);
 }
 
 static void __init find_early_table_space(unsigned long end)
@@ -568,29 +613,12 @@ static void __init early_memtest(unsigned long start, unsigned long end)
 }
 #endif
 
-/*
- * Setup the direct mapping of the physical memory at PAGE_OFFSET.
- * This runs before bootmem is initialized and gets pages directly from
- * the physical memory. To access them they are temporarily mapped.
- */
-unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end)
+static unsigned long __init kernel_physical_mapping_init(unsigned long start,
+                                               unsigned long end,
+                                               unsigned long page_size_mask)
 {
-       unsigned long next, last_map_addr = end;
-       unsigned long start_phys = start, end_phys = end;
-
-       printk(KERN_INFO "init_memory_mapping\n");
 
-       /*
-        * Find space for the kernel direct mapping tables.
-        *
-        * Later we should allocate these tables in the local node of the
-        * memory mapped. Unfortunately this is done currently before the
-        * nodes are discovered.
-        */
-       if (!after_bootmem) {
-               init_gbpages();
-               find_early_table_space(end);
-       }
+       unsigned long next, last_map_addr = end;
 
        start = (unsigned long)__va(start);
        end = (unsigned long)__va(end);
@@ -605,7 +633,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned lon
                        next = end;
 
                if (pgd_val(*pgd)) {
-                       last_map_addr = phys_pud_update(pgd, __pa(start), __pa(end));
+                       last_map_addr = phys_pud_update(pgd, __pa(start),
+                                                __pa(end), page_size_mask);
                        continue;
                }
 
@@ -614,23 +643,61 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned lon
                else
                        pud = alloc_low_page(&pud_phys);
 
-               last_map_addr = phys_pud_init(pud, __pa(start), __pa(next));
+               last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
+                                                page_size_mask);
                unmap_low_page(pud);
-               if (!after_bootmem)
-                       pgd_populate(&init_mm, pgd_offset_k(start),
-                                    __va(pud_phys));
+               pgd_populate(&init_mm, pgd_offset_k(start),
+                            __va(pud_phys));
        }
 
+       return last_map_addr;
+}
+/*
+ * Setup the direct mapping of the physical memory at PAGE_OFFSET.
+ * This runs before bootmem is initialized and gets pages directly from
+ * the physical memory. To access them they are temporarily mapped.
+ */
+unsigned long __init_refok init_memory_mapping(unsigned long start,
+                                              unsigned long end)
+{
+       unsigned long last_map_addr;
+       unsigned long page_size_mask = 0;
+
+       printk(KERN_INFO "init_memory_mapping\n");
+
+       /*
+        * Find space for the kernel direct mapping tables.
+        *
+        * Later we should allocate these tables in the local node of the
+        * memory mapped. Unfortunately this is done currently before the
+        * nodes are discovered.
+        */
+       if (!after_bootmem) {
+               init_gbpages();
+               find_early_table_space(end);
+       }
+
+       if (direct_gbpages)
+               page_size_mask |= 1 << PG_LEVEL_1G;
+       if (cpu_has_pse)
+               page_size_mask |= 1 << PG_LEVEL_2M;
+
+       last_map_addr = kernel_physical_mapping_init(start, end,
+                                                        page_size_mask);
+
        if (!after_bootmem)
                mmu_cr4_features = read_cr4();
        __flush_tlb_all();
 
-       if (!after_bootmem)
+       if (!after_bootmem && table_end > table_start)
                reserve_early(table_start << PAGE_SHIFT,
                                 table_end << PAGE_SHIFT, "PGTABLE");
 
+       printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
+                        last_map_addr, end);
+
        if (!after_bootmem)
-               early_memtest(start_phys, end_phys);
+               early_memtest(start, end);
 
        return last_map_addr >> PAGE_SHIFT;
 }