]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/powerpc/mm/hash_utils_64.c
[WATCHDOG] Merge code clean-up's from Alan Cox.
[linux-2.6-omap-h63xx.git] / arch / powerpc / mm / hash_utils_64.c
index 8d3b58ebd38e62d737f2f10aa773406d7c0c6473..5ce5a4dcd00823c17447737f18ad5ad03c820185 100644 (file)
@@ -68,6 +68,7 @@
 
 #define KB (1024)
 #define MB (1024*KB)
+#define GB (1024L*MB)
 
 /*
  * Note:  pte   --> Linux PTE
@@ -102,7 +103,6 @@ int mmu_kernel_ssize = MMU_SEGSIZE_256M;
 int mmu_highuser_ssize = MMU_SEGSIZE_256M;
 u16 mmu_slb_size = 64;
 #ifdef CONFIG_HUGETLB_PAGE
-int mmu_huge_psize = MMU_PAGE_16M;
 unsigned int HPAGE_SHIFT;
 #endif
 #ifdef CONFIG_PPC_64K_PAGES
@@ -329,6 +329,44 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
        return 0;
 }
 
+/* Scan for 16G memory blocks that have been set aside for huge pages
+ * and reserve those blocks for 16G huge pages.
+ */
+static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
+                                       const char *uname, int depth,
+                                       void *data) {
+       char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+       unsigned long *addr_prop;
+       u32 *page_count_prop;
+       unsigned int expected_pages;
+       long unsigned int phys_addr;
+       long unsigned int block_size;
+
+       /* We are scanning "memory" nodes only */
+       if (type == NULL || strcmp(type, "memory") != 0)
+               return 0;
+
+       /* This property is the log base 2 of the number of virtual pages that
+        * will represent this memory block. */
+       page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
+       if (page_count_prop == NULL)
+               return 0;
+       expected_pages = (1 << page_count_prop[0]);
+       addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
+       if (addr_prop == NULL)
+               return 0;
+       phys_addr = addr_prop[0];
+       block_size = addr_prop[1];
+       if (block_size != (16 * GB))
+               return 0;
+       printk(KERN_INFO "Huge page(16GB) memory: "
+                       "addr = 0x%lX size = 0x%lX pages = %d\n",
+                       phys_addr, block_size, expected_pages);
+       lmb_reserve(phys_addr, block_size * expected_pages);
+       add_gpage(phys_addr, block_size, expected_pages);
+       return 0;
+}
+
 static void __init htab_init_page_sizes(void)
 {
        int rc;
@@ -418,15 +456,18 @@ static void __init htab_init_page_sizes(void)
               );
 
 #ifdef CONFIG_HUGETLB_PAGE
-       /* Init large page size. Currently, we pick 16M or 1M depending
+       /* Reserve 16G huge page memory sections for huge pages */
+       of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
+
+/* Set default large page size. Currently, we pick 16M or 1M depending
         * on what is available
         */
        if (mmu_psize_defs[MMU_PAGE_16M].shift)
-               set_huge_psize(MMU_PAGE_16M);
+               HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
        /* With 4k/4level pagetables, we can't (for now) cope with a
         * huge page size < PMD_SIZE */
        else if (mmu_psize_defs[MMU_PAGE_1M].shift)
-               set_huge_psize(MMU_PAGE_1M);
+               HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
 #endif /* CONFIG_HUGETLB_PAGE */
 }
 
@@ -847,7 +888,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
 
 #ifdef CONFIG_HUGETLB_PAGE
        /* Handle hugepage regions */
-       if (HPAGE_SHIFT && psize == mmu_huge_psize) {
+       if (HPAGE_SHIFT && mmu_huge_psizes[psize]) {
                DBG_LOW(" -> huge page !\n");
                return hash_huge_page(mm, access, ea, vsid, local, trap);
        }