#include <asm/tsb.h>
#include <asm/hypervisor.h>
#include <asm/prom.h>
-#include <asm/sstate.h>
#include <asm/mdesc.h>
#include <asm/cpudata.h>
+#include <asm/irq.h>
-#define MAX_PHYS_ADDRESS (1UL << 42UL)
-#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
-#define KPTE_BITMAP_BYTES \
- ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
+#include "init.h"
unsigned long kern_linear_pte_xor[2] __read_mostly;
#endif /* CONFIG_DEBUG_DCFLUSH */
}
-struct linux_prom_translation {
- unsigned long virt;
- unsigned long size;
- unsigned long data;
-};
-
-/* Exported for kernel TLB miss handling in ktlb.S */
struct linux_prom_translation prom_trans[512] __read_mostly;
unsigned int prom_trans_ents __read_mostly;
-/* Exported for SMP bootup purposes. */
unsigned long kern_locked_tte_data;
/* The obp translations are saved based on 8k pagesize, since obp can
start += PAGE_SIZE;
}
+ if (start > end)
+ start = end;
+
return start;
}
#else
int count, nid;
u64 grp;
+ /* This is the right thing to do on currently supported
+ * SUN4U NUMA platforms as well, as the PCI controller does
+ * not sit behind any particular memory controller.
+ */
if (!mlgroups)
return -1;
return err;
}
+static int __init numa_parse_jbus(void)
+{
+ unsigned long cpu, index;
+
+ /* NUMA node id is encoded in bits 36 and higher, and there is
+ * a 1-to-1 mapping from CPU ID to NUMA node ID.
+ */
+ index = 0;
+ for_each_present_cpu(cpu) {
+ numa_cpu_lookup_table[cpu] = index;
+ numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu);
+ node_masks[index].mask = ~((1UL << 36UL) - 1UL);
+ node_masks[index].val = cpu << 36UL;
+
+ index++;
+ }
+ num_node_masks = index;
+
+ add_node_ranges();
+
+ for (index = 0; index < num_node_masks; index++) {
+ allocate_node_data(index);
+ node_set_online(index);
+ }
+
+ return 0;
+}
+
static int __init numa_parse_sun4u(void)
{
+ if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+ unsigned long ver;
+
+ __asm__ ("rdpr %%ver, %0" : "=r" (ver));
+ if ((ver >> 32UL) == __JALAPENO_ID ||
+ (ver >> 32UL) == __SERRANO_ID)
+ return numa_parse_jbus();
+ }
return -1;
}
/* paging_init() sets up the page tables */
-extern void central_probe(void);
-
static unsigned long last_valid_pfn;
pgd_t swapper_pg_dir[2048];
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
- sstate_booting();
-
/* Invalidate both kernel TSBs. */
memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
#ifndef CONFIG_DEBUG_PAGEALLOC
find_ramdisk(phys_base);
- if (cmdline_memory_size)
- lmb_enforce_memory_limit(phys_base + cmdline_memory_size);
+ lmb_enforce_memory_limit(cmdline_memory_size);
lmb_analyze();
lmb_dump_all();
if (tlb_type == hypervisor)
sun4v_mdesc_init();
+ /* Once the OF device tree and MDESC have been setup, we know
+ * the list of possible cpus. Therefore we can allocate the
+ * IRQ stacks.
+ */
+ for_each_possible_cpu(i) {
+ /* XXX Use node local allocations... XXX */
+ softirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ hardirq_stack[i] = __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+ }
+
/* Setup bootmem... */
last_valid_pfn = end_pfn = bootmem_init(phys_base);
}
printk("Booting Linux...\n");
-
- central_probe();
- cpu_probe();
}
int __init page_in_phys_avail(unsigned long paddr)
* memory list again, and make sure it provides at least as much
* memory as 'pavail' does.
*/
-static void setup_valid_addr_bitmap_from_pavail(void)
+static void __init setup_valid_addr_bitmap_from_pavail(void)
{
int i;
void free_initmem(void)
{
unsigned long addr, initend;
+ int do_free = 1;
+
+ /* If the physical memory maps were trimmed by kernel command
+ * line options, don't even try freeing this initmem stuff up.
+ * The kernel image could have been in the trimmed out region
+ * and if so the freeing below will free invalid page structs.
+ */
+ if (cmdline_memory_size)
+ do_free = 0;
/*
* The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
((unsigned long) __va(kern_base)) -
((unsigned long) KERNBASE));
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
- p = virt_to_page(page);
- ClearPageReserved(p);
- init_page_count(p);
- __free_page(p);
- num_physpages++;
- totalram_pages++;
+ if (do_free) {
+ p = virt_to_page(page);
+
+ ClearPageReserved(p);
+ init_page_count(p);
+ __free_page(p);
+ num_physpages++;
+ totalram_pages++;
+ }
}
}
pgprot_t PAGE_SHARED __read_mostly;
EXPORT_SYMBOL(PAGE_SHARED);
-pgprot_t PAGE_EXEC __read_mostly;
unsigned long pg_iobits __read_mostly;
unsigned long _PAGE_IE __read_mostly;
EXPORT_SYMBOL(_PAGE_CACHE);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-
-#define VMEMMAP_CHUNK_SHIFT 22
-#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
-#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
-#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
-
-#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
- sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
unsigned long vmemmap_table[VMEMMAP_SIZE];
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
_PAGE_CACHE_4U | _PAGE_P_4U |
__ACCESS_BITS_4U | __DIRTY_BITS_4U |
_PAGE_EXEC_4U | _PAGE_L_4U);
- PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
_PAGE_IE = _PAGE_IE_4U;
_PAGE_E = _PAGE_E_4U;
#ifdef CONFIG_DEBUG_PAGEALLOC
kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
- 0xfffff80000000000;
+ 0xfffff80000000000UL;
#else
kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
- 0xfffff80000000000;
+ 0xfffff80000000000UL;
#endif
kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
_PAGE_P_4U | _PAGE_W_4U);
__ACCESS_BITS_4V | __DIRTY_BITS_4V |
_PAGE_EXEC_4V);
PAGE_KERNEL_LOCKED = PAGE_KERNEL;
- PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
_PAGE_IE = _PAGE_IE_4V;
_PAGE_E = _PAGE_E_4V;
#ifdef CONFIG_DEBUG_PAGEALLOC
kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
- 0xfffff80000000000;
+ 0xfffff80000000000UL;
#else
kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
- 0xfffff80000000000;
+ 0xfffff80000000000UL;
#endif
kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
_PAGE_P_4V | _PAGE_W_4V);
#ifdef CONFIG_DEBUG_PAGEALLOC
kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
- 0xfffff80000000000;
+ 0xfffff80000000000UL;
#else
kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
- 0xfffff80000000000;
+ 0xfffff80000000000UL;
#endif
kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
_PAGE_P_4V | _PAGE_W_4V);