]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - arch/sparc64/mm/init.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6-omap-h63xx.git] / arch / sparc64 / mm / init.c
index 217de3ea29e842cff4c0eff6be2b82c5a13f25fe..3c10daf8fc015799a300ffc86d3d014b8f7ae5ee 100644 (file)
 #include <asm/tsb.h>
 #include <asm/hypervisor.h>
 #include <asm/prom.h>
-#include <asm/sstate.h>
 #include <asm/mdesc.h>
 #include <asm/cpudata.h>
 #include <asm/irq.h>
 
-#define MAX_PHYS_ADDRESS       (1UL << 42UL)
-#define KPTE_BITMAP_CHUNK_SZ   (256UL * 1024UL * 1024UL)
-#define KPTE_BITMAP_BYTES      \
-       ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
+#include "init.h"
 
 unsigned long kern_linear_pte_xor[2] __read_mostly;
 
@@ -416,17 +412,9 @@ void mmu_info(struct seq_file *m)
 #endif /* CONFIG_DEBUG_DCFLUSH */
 }
 
-struct linux_prom_translation {
-       unsigned long virt;
-       unsigned long size;
-       unsigned long data;
-};
-
-/* Exported for kernel TLB miss handling in ktlb.S */
 struct linux_prom_translation prom_trans[512] __read_mostly;
 unsigned int prom_trans_ents __read_mostly;
 
-/* Exported for SMP bootup purposes. */
 unsigned long kern_locked_tte_data;
 
 /* The obp translations are saved based on 8k pagesize, since obp can
@@ -796,6 +784,9 @@ static unsigned long nid_range(unsigned long start, unsigned long end,
                start += PAGE_SIZE;
        }
 
+       if (start > end)
+               start = end;
+
        return start;
 }
 #else
@@ -935,6 +926,10 @@ int of_node_to_nid(struct device_node *dp)
        int count, nid;
        u64 grp;
 
+       /* This is the right thing to do on currently supported
+        * SUN4U NUMA platforms as well, as the PCI controller does
+        * not sit behind any particular memory controller.
+        */
        if (!mlgroups)
                return -1;
 
@@ -1203,8 +1198,44 @@ out:
        return err;
 }
 
+static int __init numa_parse_jbus(void)
+{
+       unsigned long cpu, index;
+
+       /* NUMA node id is encoded in bits 36 and higher, and there is
+        * a 1-to-1 mapping from CPU ID to NUMA node ID.
+        */
+       index = 0;
+       for_each_present_cpu(cpu) {
+               numa_cpu_lookup_table[cpu] = index;
+               numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu);
+               node_masks[index].mask = ~((1UL << 36UL) - 1UL);
+               node_masks[index].val = cpu << 36UL;
+
+               index++;
+       }
+       num_node_masks = index;
+
+       add_node_ranges();
+
+       for (index = 0; index < num_node_masks; index++) {
+               allocate_node_data(index);
+               node_set_online(index);
+       }
+
+       return 0;
+}
+
 static int __init numa_parse_sun4u(void)
 {
+       if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+               unsigned long ver;
+
+               __asm__ ("rdpr %%ver, %0" : "=r" (ver));
+               if ((ver >> 32UL) == __JALAPENO_ID ||
+                   (ver >> 32UL) == __SERRANO_ID)
+                       return numa_parse_jbus();
+       }
        return -1;
 }
 
@@ -1630,8 +1661,6 @@ void __cpuinit sun4v_ktsb_register(void)
 
 /* paging_init() sets up the page tables */
 
-extern void central_probe(void);
-
 static unsigned long last_valid_pfn;
 pgd_t swapper_pg_dir[2048];
 
@@ -1676,8 +1705,6 @@ void __init paging_init(void)
        kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
        kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
-       sstate_booting();
-
        /* Invalidate both kernel TSBs.  */
        memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
 #ifndef CONFIG_DEBUG_PAGEALLOC
@@ -1723,8 +1750,7 @@ void __init paging_init(void)
 
        find_ramdisk(phys_base);
 
-       if (cmdline_memory_size)
-               lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
+       lmb_enforce_memory_limit(cmdline_memory_size);
 
        lmb_analyze();
        lmb_dump_all();
@@ -1801,9 +1827,6 @@ void __init paging_init(void)
        }
 
        printk("Booting Linux...\n");
-
-       central_probe();
-       cpu_probe();
 }
 
 int __init page_in_phys_avail(unsigned long paddr)
@@ -1841,7 +1864,7 @@ static int pavail_rescan_ents __initdata;
  * memory list again, and make sure it provides at least as much
  * memory as 'pavail' does.
  */
-static void setup_valid_addr_bitmap_from_pavail(void)
+static void __init setup_valid_addr_bitmap_from_pavail(void)
 {
        int i;
 
@@ -1961,6 +1984,15 @@ void __init mem_init(void)
 void free_initmem(void)
 {
        unsigned long addr, initend;
+       int do_free = 1;
+
+       /* If the physical memory maps were trimmed by kernel command
+        * line options, don't even try freeing this initmem stuff up.
+        * The kernel image could have been in the trimmed out region
+        * and if so the freeing below will free invalid page structs.
+        */
+       if (cmdline_memory_size)
+               do_free = 0;
 
        /*
         * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
@@ -1975,13 +2007,16 @@ void free_initmem(void)
                        ((unsigned long) __va(kern_base)) -
                        ((unsigned long) KERNBASE));
                memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-               p = virt_to_page(page);
 
-               ClearPageReserved(p);
-               init_page_count(p);
-               __free_page(p);
-               num_physpages++;
-               totalram_pages++;
+               if (do_free) {
+                       p = virt_to_page(page);
+
+                       ClearPageReserved(p);
+                       init_page_count(p);
+                       __free_page(p);
+                       num_physpages++;
+                       totalram_pages++;
+               }
        }
 }
 
@@ -2018,7 +2053,6 @@ pgprot_t PAGE_COPY __read_mostly;
 pgprot_t PAGE_SHARED __read_mostly;
 EXPORT_SYMBOL(PAGE_SHARED);
 
-pgprot_t PAGE_EXEC __read_mostly;
 unsigned long pg_iobits __read_mostly;
 
 unsigned long _PAGE_IE __read_mostly;
@@ -2031,14 +2065,6 @@ unsigned long _PAGE_CACHE __read_mostly;
 EXPORT_SYMBOL(_PAGE_CACHE);
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-
-#define VMEMMAP_CHUNK_SHIFT    22
-#define VMEMMAP_CHUNK          (1UL << VMEMMAP_CHUNK_SHIFT)
-#define VMEMMAP_CHUNK_MASK     ~(VMEMMAP_CHUNK - 1UL)
-#define VMEMMAP_ALIGN(x)       (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
-
-#define VMEMMAP_SIZE   ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
-                         sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
 unsigned long vmemmap_table[VMEMMAP_SIZE];
 
 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
@@ -2122,7 +2148,6 @@ static void __init sun4u_pgprot_init(void)
                                       _PAGE_CACHE_4U | _PAGE_P_4U |
                                       __ACCESS_BITS_4U | __DIRTY_BITS_4U |
                                       _PAGE_EXEC_4U | _PAGE_L_4U);
-       PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
 
        _PAGE_IE = _PAGE_IE_4U;
        _PAGE_E = _PAGE_E_4U;
@@ -2133,10 +2158,10 @@ static void __init sun4u_pgprot_init(void)
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
-               0xfffff80000000000;
+               0xfffff80000000000UL;
 #else
        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
-               0xfffff80000000000;
+               0xfffff80000000000UL;
 #endif
        kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
                                   _PAGE_P_4U | _PAGE_W_4U);
@@ -2174,7 +2199,6 @@ static void __init sun4v_pgprot_init(void)
                                __ACCESS_BITS_4V | __DIRTY_BITS_4V |
                                _PAGE_EXEC_4V);
        PAGE_KERNEL_LOCKED = PAGE_KERNEL;
-       PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
 
        _PAGE_IE = _PAGE_IE_4V;
        _PAGE_E = _PAGE_E_4V;
@@ -2182,20 +2206,20 @@ static void __init sun4v_pgprot_init(void)
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
-               0xfffff80000000000;
+               0xfffff80000000000UL;
 #else
        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
-               0xfffff80000000000;
+               0xfffff80000000000UL;
 #endif
        kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
                                   _PAGE_P_4V | _PAGE_W_4V);
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
        kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
-               0xfffff80000000000;
+               0xfffff80000000000UL;
 #else
        kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
-               0xfffff80000000000;
+               0xfffff80000000000UL;
 #endif
        kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
                                   _PAGE_P_4V | _PAGE_W_4V);