X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=mm%2Fpage_alloc.c;h=4f59d90b81e65a314e0433dc61371b785b3df61f;hb=f0b364a13d7fba001c0571d954f165dfaf5f434a;hp=75133e1dc4b1c624f2a813f9cfeb9e0305ecf407;hpb=9c7cd6877cf8db15269163deda69392263124c1e;p=linux-2.6-omap-h63xx.git diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 75133e1dc4b..4f59d90b81e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -131,6 +131,10 @@ static unsigned long __initdata dma_reserve; int __initdata nr_nodemap_entries; unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE + unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; + unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; +#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #ifdef CONFIG_DEBUG_VM @@ -938,7 +942,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, */ do { zone = *z; - if (unlikely((gfp_mask & __GFP_THISNODE) && + if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) break; if ((alloc_flags & ALLOC_CPUSET) && @@ -1252,14 +1256,12 @@ unsigned int nr_free_pagecache_pages(void) { return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); } -#ifdef CONFIG_NUMA -static void show_node(struct zone *zone) + +static inline void show_node(struct zone *zone) { - printk("Node %ld ", zone_to_nid(zone)); + if (NUMA_BUILD) + printk("Node %ld ", zone_to_nid(zone)); } -#else -#define show_node(zone) do { } while (0) -#endif void si_meminfo(struct sysinfo *val) { @@ -1301,34 +1303,30 @@ void si_meminfo_node(struct sysinfo *val, int nid) */ void show_free_areas(void) { - int cpu, temperature; + int cpu; unsigned long active; unsigned long inactive; unsigned long free; struct zone *zone; for_each_zone(zone) { - show_node(zone); - printk("%s per-cpu:", zone->name); - - if (!populated_zone(zone)) { - printk(" empty\n"); + if (!populated_zone(zone)) continue; - } else - printk("\n"); + + show_node(zone); + printk("%s per-cpu:\n", zone->name); for_each_online_cpu(cpu) { struct per_cpu_pageset *pageset; pageset = zone_pcp(zone, cpu); - for (temperature = 0; temperature < 2; temperature++) - printk("cpu %d %s: high %d, batch %d used:%d\n", - cpu, - temperature ? "cold" : "hot", - pageset->pcp[temperature].high, - pageset->pcp[temperature].batch, - pageset->pcp[temperature].count); + printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d " + "Cold: hi:%5d, btch:%4d usd:%4d\n", + cpu, pageset->pcp[0].high, + pageset->pcp[0].batch, pageset->pcp[0].count, + pageset->pcp[1].high, pageset->pcp[1].batch, + pageset->pcp[1].count); } } @@ -1350,6 +1348,9 @@ void show_free_areas(void) for_each_zone(zone) { int i; + if (!populated_zone(zone)) + continue; + show_node(zone); printk("%s" " free:%lukB" @@ -1382,12 +1383,11 @@ void show_free_areas(void) for_each_zone(zone) { unsigned long nr[MAX_ORDER], flags, order, total = 0; + if (!populated_zone(zone)) + continue; + show_node(zone); printk("%s: ", zone->name); - if (!populated_zone(zone)) { - printk("empty\n"); - continue; - } spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { @@ -1591,7 +1591,7 @@ static int __meminit __build_all_zonelists(void *dummy) void __meminit build_all_zonelists(void) { if (system_state == SYSTEM_BOOTING) { - __build_all_zonelists(0); + __build_all_zonelists(NULL); cpuset_init_current_mems_allowed(); } else { /* we have to stop all cpus to guaranntee there is no user @@ -1829,6 +1829,9 @@ static int __cpuinit process_zones(int cpu) for_each_zone(zone) { + if (!populated_zone(zone)) + continue; + zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), GFP_KERNEL, cpu_to_node(cpu)); if (!zone_pcp(zone, cpu)) @@ -2094,6 +2097,62 @@ void __init sparse_memory_present_with_active_regions(int nid) early_node_map[i].end_pfn); } +/** + * push_node_boundaries - Push node boundaries to at least the requested boundary + * @nid: The nid of the node to push the boundary for + * @start_pfn: The start pfn of the node + * @end_pfn: The end pfn of the node + * + * In reserve-based hot-add, mem_map is allocated that is unused until hotadd + * time. Specifically, on x86_64, SRAT will report ranges that can potentially + * be hotplugged even though no physical memory exists. This function allows + * an arch to push out the node boundaries so mem_map is allocated that can + * be used later. + */ +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE +void __init push_node_boundaries(unsigned int nid, + unsigned long start_pfn, unsigned long end_pfn) +{ + printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", + nid, start_pfn, end_pfn); + + /* Initialise the boundary for this node if necessary */ + if (node_boundary_end_pfn[nid] == 0) + node_boundary_start_pfn[nid] = -1UL; + + /* Update the boundaries */ + if (node_boundary_start_pfn[nid] > start_pfn) + node_boundary_start_pfn[nid] = start_pfn; + if (node_boundary_end_pfn[nid] < end_pfn) + node_boundary_end_pfn[nid] = end_pfn; +} + +/* If necessary, push the node boundary out for reserve hotadd */ +static void __init account_node_boundary(unsigned int nid, + unsigned long *start_pfn, unsigned long *end_pfn) +{ + printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", + nid, *start_pfn, *end_pfn); + + /* Return if boundary information has not been provided */ + if (node_boundary_end_pfn[nid] == 0) + return; + + /* Check the boundaries and update if necessary */ + if (node_boundary_start_pfn[nid] < *start_pfn) + *start_pfn = node_boundary_start_pfn[nid]; + if (node_boundary_end_pfn[nid] > *end_pfn) + *end_pfn = node_boundary_end_pfn[nid]; +} +#else +void __init push_node_boundaries(unsigned int nid, + unsigned long start_pfn, unsigned long end_pfn) {} + +static void __init account_node_boundary(unsigned int nid, + unsigned long *start_pfn, unsigned long *end_pfn) {} +#endif + + /** * get_pfn_range_for_nid - Return the start and end page frames for a node * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned @@ -2121,6 +2180,9 @@ void __init get_pfn_range_for_nid(unsigned int nid, printk(KERN_WARNING "Node %u active with no memory\n", nid); *start_pfn = 0; } + + /* Push the node boundaries out if requested */ + account_node_boundary(nid, start_pfn, end_pfn); } /* @@ -2346,6 +2408,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, zone->spanned_pages = size; zone->present_pages = realsize; #ifdef CONFIG_NUMA + zone->node = nid; zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) / 100; zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; @@ -2527,6 +2590,10 @@ void __init remove_all_active_ranges() { memset(early_node_map, 0, sizeof(early_node_map)); nr_nodemap_entries = 0; +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE + memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); + memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); +#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ } /* Compare two active node_active_regions */