X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=mm%2Fpage_alloc.c;h=b2838c24e582c11b8e0680ca5d5b5f97389145ba;hb=d69a3ad6a0e47b2aa9b2b2ddfd385752132a4d34;hp=43f757fcf30f6b1dc61cffd2c824e791db28c418;hpb=e6d5a11dad44b8ae18ca8fc4ecb72ccccfa0a2d2;p=linux-2.6-omap-h63xx.git diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 43f757fcf30..b2838c24e58 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -123,7 +123,7 @@ static unsigned long __meminitdata dma_reserve; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP /* - * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct + * MAX_ACTIVE_REGIONS determines the maximum number of distinct * ranges of memory (RAM) that may be registered with add_active_range(). * Ranges passed to add_active_range() will be merged if possible * so the number of times add_active_range() can be called is @@ -305,7 +305,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) { int i; - VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); /* * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO * and __GFP_HIGHMEM from hard or soft interrupt context. @@ -749,23 +748,6 @@ int move_freepages_block(struct zone *zone, struct page *page, int migratetype) return move_freepages(zone, start_page, end_page, migratetype); } -/* Return the page with the lowest PFN in the list */ -static struct page *min_page(struct list_head *list) -{ - unsigned long min_pfn = -1UL; - struct page *min_page = NULL, *page;; - - list_for_each_entry(page, list, lru) { - unsigned long pfn = page_to_pfn(page); - if (pfn < min_pfn) { - min_pfn = pfn; - min_page = page; - } - } - - return min_page; -} - /* Remove an element from the buddy allocator from the fallback list */ static struct page *__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) @@ -789,11 +771,8 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order, if (list_empty(&area->free_list[migratetype])) continue; - /* Bias kernel allocations towards low pfns */ page = list_entry(area->free_list[migratetype].next, struct page, lru); - if (unlikely(start_migratetype != MIGRATE_MOVABLE)) - page = min_page(&area->free_list[migratetype]); area->nr_free--; /* @@ -868,8 +847,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, struct page *page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; + + /* + * Split buddy pages returned by expand() are received here + * in physical page order. The page is added to the callers and + * list and the list head then moves forward. From the callers + * perspective, the linked list is ordered by page number in + * some conditions. This is useful for IO devices that can + * merge IO requests if the physical pages are ordered + * properly. + */ list_add(&page->lru, list); set_page_private(page, migratetype); + list = &page->lru; } spin_unlock(&zone->lock); return i; @@ -1260,7 +1250,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, * skip over zones that are not allowed by the cpuset, or that have * been recently (in last second) found to be nearly full. See further * comments in mmzone.h. Reduces cache footprint of zonelist scans - * that have to skip over alot of full or unallowed zones. + * that have to skip over a lot of full or unallowed zones. * * If the zonelist cache is present in the passed in zonelist, then * returns a pointer to the allowed node mask (either the current @@ -2358,7 +2348,7 @@ void build_all_zonelists(void) __build_all_zonelists(NULL); cpuset_init_current_mems_allowed(); } else { - /* we have to stop all cpus to guaranntee there is no user + /* we have to stop all cpus to guarantee there is no user of zonelist */ stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); /* cpuset refresh routine should be here */ @@ -2576,7 +2566,7 @@ static void __meminit zone_init_free_lists(struct pglist_data *pgdat, memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) #endif -static int __devinit zone_batchsize(struct zone *zone) +static int zone_batchsize(struct zone *zone) { int batch; @@ -2864,7 +2854,7 @@ static int __meminit first_active_region_index_in_nid(int nid) /* * Basic iterator support. Return the next active range of PFNs for a node - * Note: nid == MAX_NUMNODES returns next region regardles of node + * Note: nid == MAX_NUMNODES returns next region regardless of node */ static int __meminit next_active_region_index_in_nid(int index, int nid) { @@ -3286,6 +3276,16 @@ static void inline setup_usemap(struct pglist_data *pgdat, #endif /* CONFIG_SPARSEMEM */ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE + +/* Return a sensible default order for the pageblock size. */ +static inline int pageblock_default_order(void) +{ + if (HPAGE_SHIFT > PAGE_SHIFT) + return HUGETLB_PAGE_ORDER; + + return MAX_ORDER-1; +} + /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ static inline void __init set_pageblock_order(unsigned int order) { @@ -3301,7 +3301,16 @@ static inline void __init set_pageblock_order(unsigned int order) } #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ -/* Defined this way to avoid accidently referencing HUGETLB_PAGE_ORDER */ +/* + * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() + * and pageblock_default_order() are unused as pageblock_order is set + * at compile-time. See include/linux/pageblock-flags.h for the values of + * pageblock_order based on the kernel config + */ +static inline int pageblock_default_order(unsigned int order) +{ + return MAX_ORDER-1; +} #define set_pageblock_order(x) do {} while (0) #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ @@ -3386,7 +3395,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, if (!size) continue; - set_pageblock_order(HUGETLB_PAGE_ORDER); + set_pageblock_order(pageblock_default_order()); setup_usemap(pgdat, zone, size); ret = init_currently_empty_zone(zone, zone_start_pfn, size, MEMMAP_EARLY); @@ -3429,7 +3438,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) mem_map = NODE_DATA(0)->node_mem_map; #ifdef CONFIG_ARCH_POPULATES_NODE_MAP if (page_to_pfn(mem_map) != pgdat->node_start_pfn) - mem_map -= pgdat->node_start_pfn; + mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ } #endif