#include <asm/mmu_context.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
+#include <asm/sections.h>
#include <asm/cache.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
-void (*copy_page)(void *from, void *to);
-void (*clear_page)(void *to);
+unsigned long cached_to_uncached = 0;
void show_mem(void)
{
show_free_areas();
for_each_online_pgdat(pgdat) {
- struct page *page, *end;
- unsigned long flags;
+ unsigned long flags, i;
pgdat_resize_lock(pgdat, &flags);
- page = pgdat->node_mem_map;
- end = page + pgdat->node_spanned_pages;
-
- do {
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat_page_nr(pgdat, i);
total++;
if (PageReserved(page))
reserved++;
free++;
else
shared += page_count(page) - 1;
- page++;
- } while (page < end);
-
+ }
pgdat_resize_unlock(pgdat, &flags);
}
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
- flush_tlb_one(get_asid(), addr);
+ if (cached_to_uncached)
+ flush_tlb_one(get_asid(), addr);
}
/*
set_pte_phys(address, phys, prot);
}
-#endif /* CONFIG_MMU */
-
-/* References to section boundaries */
-extern char _text, _etext, _edata, __bss_start, _end;
-extern char __init_begin, __init_end;
+void __init page_table_range_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ int pgd_idx;
+ unsigned long vaddr;
+
+ vaddr = start & PMD_MASK;
+ end = (end + PMD_SIZE - 1) & PMD_MASK;
+ pgd_idx = pgd_index(vaddr);
+ pgd = pgd_base + pgd_idx;
+
+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
+ BUG_ON(pgd_none(*pgd));
+ pud = pud_offset(pgd, 0);
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, 0);
+
+ if (!pmd_present(*pmd)) {
+ pte_t *pte_table;
+ pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ memset(pte_table, 0, PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, pte_table);
+ }
+
+ vaddr += PMD_SIZE;
+ }
+}
+#endif /* CONFIG_MMU */
/*
* paging_init() sets up the page tables
*/
void __init paging_init(void)
{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
int nid;
/* We don't need to map the kernel through the TLB, as
* check for a null value. */
set_TTB(swapper_pg_dir);
+ /* Populate the relevant portions of swapper_pg_dir so that
+ * we can use the fixmap entries without calling kmalloc.
+ * pte's will be filled in by __set_fixmap(). */
+ page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);
+
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
- unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long low, start_pfn;
- memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-
start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
low = pgdat->bdata->node_low_pfn;
- max_zone_pfns[ZONE_NORMAL] = low;
- add_active_range(nid, start_pfn, low);
+ if (max_zone_pfns[ZONE_NORMAL] < low)
+ max_zone_pfns[ZONE_NORMAL] = low;
printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
nid, start_pfn, low);
+ }
- free_area_init_nodes(max_zone_pfns);
+ free_area_init_nodes(max_zone_pfns);
- printk("Node %u: mem_map starts at %p\n",
- pgdat->node_id, pgdat->node_mem_map);
- }
+#ifdef CONFIG_SUPERH32
+ /* Set up the uncached fixmap */
+ set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
+
+#ifdef CONFIG_29BIT
+ /*
+ * Handle trivial transitions between cached and uncached
+ * segments, making use of the 1:1 mapping relationship in
+ * 512MB lowmem.
+ */
+ cached_to_uncached = P2SEG - P1SEG;
+#endif
+#endif
}
static struct kcore_list kcore_mem, kcore_vmalloc;
+int after_bootmem = 0;
void __init mem_init(void)
{
- int codesize, reservedpages, datasize, initsize;
+ int codesize, datasize, initsize;
int nid;
- reservedpages = 0;
+ num_physpages = 0;
+ high_memory = NULL;
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long node_pages = 0;
void *node_high_memory;
- int i;
num_physpages += pgdat->node_present_pages;
totalram_pages += node_pages;
- for (i = 0; i < node_pages; i++)
- if (PageReserved(pgdat->node_mem_map + i))
- reservedpages++;
-
- node_high_memory = (void *)((pgdat->node_start_pfn +
- pgdat->node_spanned_pages) <<
- PAGE_SHIFT);
+ node_high_memory = (void *)__va((pgdat->node_start_pfn +
+ pgdat->node_spanned_pages) <<
+ PAGE_SHIFT);
if (node_high_memory > high_memory)
high_memory = node_high_memory;
}
memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE);
- /*
- * Setup wrappers for copy/clear_page(), these will get overridden
- * later in the boot process if a better method is available.
- */
-#ifdef CONFIG_MMU
- copy_page = copy_page_slow;
- clear_page = clear_page_slow;
-#else
- copy_page = copy_page_nommu;
- clear_page = clear_page_nommu;
-#endif
+ after_bootmem = 1;
codesize = (unsigned long) &_etext - (unsigned long) &_text;
datasize = (unsigned long) &_edata - (unsigned long) &_etext;
VMALLOC_END - VMALLOC_START);
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
- "%dk reserved, %dk data, %dk init)\n",
+ "%dk data, %dk init)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- totalram_pages << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT-10),
codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10);
free_page(addr);
totalram_pages++;
}
- printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+ printk("Freeing unused kernel memory: %ldk freed\n",
+ ((unsigned long)&__init_end -
+ (unsigned long)&__init_begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
free_page(p);
totalram_pages++;
}
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
}
#endif
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+void online_page(struct page *page)
+{
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ totalram_pages++;
+ num_physpages++;
+}
+
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+ pg_data_t *pgdat;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ pgdat = NODE_DATA(nid);
+
+ /* We only have ZONE_NORMAL, so this is easy.. */
+ ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
+ if (unlikely(ret))
+ printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arch_add_memory);
+
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 addr)
+{
+ /* Node 0 for now.. */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+#endif