]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/page_alloc.c
leds: hw acceleration for Clevo mail LED driver
[linux-2.6-omap-h63xx.git] / mm / page_alloc.c
index 12376ae3f7334e124d488b1d7bc3d291d64e9be1..37576b822f06c98c2d3342af1cdecbe0e5a8f25b 100644 (file)
@@ -305,7 +305,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 {
        int i;
 
-       VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
        /*
         * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
         * and __GFP_HIGHMEM from hard or soft interrupt context.
@@ -538,7 +537,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 /*
  * permit the bootmem allocator to evade page validation on high-order frees
  */
-void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+void __init __free_pages_bootmem(struct page *page, unsigned int order)
 {
        if (order == 0) {
                __ClearPageReserved(page);
@@ -848,8 +847,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                struct page *page = __rmqueue(zone, order, migratetype);
                if (unlikely(page == NULL))
                        break;
+
+               /*
+                * Split buddy pages returned by expand() are received here
+                * in physical page order. The page is added to the callers and
+                * list and the list head then moves forward. From the callers
+                * perspective, the linked list is ordered by page number in
+                * some conditions. This is useful for IO devices that can
+                * merge IO requests if the physical pages are ordered
+                * properly.
+                */
                list_add(&page->lru, list);
                set_page_private(page, migratetype);
+               list = &page->lru;
        }
        spin_unlock(&zone->lock);
        return i;
@@ -880,31 +890,51 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 }
 #endif
 
-static void __drain_pages(unsigned int cpu)
+/*
+ * Drain pages of the indicated processor.
+ *
+ * The processor must either be the current processor and the
+ * thread pinned to the current processor or a processor that
+ * is not online.
+ */
+static void drain_pages(unsigned int cpu)
 {
        unsigned long flags;
        struct zone *zone;
-       int i;
 
        for_each_zone(zone) {
                struct per_cpu_pageset *pset;
+               struct per_cpu_pages *pcp;
 
                if (!populated_zone(zone))
                        continue;
 
                pset = zone_pcp(zone, cpu);
-               for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
-                       struct per_cpu_pages *pcp;
-
-                       pcp = &pset->pcp[i];
-                       local_irq_save(flags);
-                       free_pages_bulk(zone, pcp->count, &pcp->list, 0);
-                       pcp->count = 0;
-                       local_irq_restore(flags);
-               }
+
+               pcp = &pset->pcp;
+               local_irq_save(flags);
+               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               pcp->count = 0;
+               local_irq_restore(flags);
        }
 }
 
+/*
+ * Spill all of this CPU's per-cpu pages back into the buddy allocator.
+ */
+void drain_local_pages(void *arg)
+{
+       drain_pages(smp_processor_id());
+}
+
+/*
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator
+ */
+void drain_all_pages(void)
+{
+       on_each_cpu(drain_local_pages, NULL, 0, 1);
+}
+
 #ifdef CONFIG_HIBERNATION
 
 void mark_free_pages(struct zone *zone)
@@ -941,41 +971,10 @@ void mark_free_pages(struct zone *zone)
 }
 #endif /* CONFIG_PM */
 
-/*
- * Spill all of this CPU's per-cpu pages back into the buddy allocator.
- */
-void drain_local_pages(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);  
-       __drain_pages(smp_processor_id());
-       local_irq_restore(flags);       
-}
-
-void smp_drain_local_pages(void *arg)
-{
-       drain_local_pages();
-}
-
-/*
- * Spill all the per-cpu pages from all CPUs back into the buddy allocator
- */
-void drain_all_local_pages(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __drain_pages(smp_processor_id());
-       local_irq_restore(flags);
-
-       smp_call_function(smp_drain_local_pages, NULL, 0, 1);
-}
-
 /*
  * Free a 0-order page
  */
-static void fastcall free_hot_cold_page(struct page *page, int cold)
+static void free_hot_cold_page(struct page *page, int cold)
 {
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
@@ -991,10 +990,13 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
        arch_free_page(page, 0);
        kernel_map_pages(page, 1, 0);
 
-       pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+       pcp = &zone_pcp(zone, get_cpu())->pcp;
        local_irq_save(flags);
        __count_vm_event(PGFREE);
-       list_add(&page->lru, &pcp->list);
+       if (cold)
+               list_add_tail(&page->lru, &pcp->list);
+       else
+               list_add(&page->lru, &pcp->list);
        set_page_private(page, get_pageblock_migratetype(page));
        pcp->count++;
        if (pcp->count >= pcp->high) {
@@ -1005,12 +1007,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
        put_cpu();
 }
 
-void fastcall free_hot_page(struct page *page)
+void free_hot_page(struct page *page)
 {
        free_hot_cold_page(page, 0);
 }
        
-void fastcall free_cold_page(struct page *page)
+void free_cold_page(struct page *page)
 {
        free_hot_cold_page(page, 1);
 }
@@ -1052,7 +1054,7 @@ again:
        if (likely(order == 0)) {
                struct per_cpu_pages *pcp;
 
-               pcp = &zone_pcp(zone, cpu)->pcp[cold];
+               pcp = &zone_pcp(zone, cpu)->pcp;
                local_irq_save(flags);
                if (!pcp->count) {
                        pcp->count = rmqueue_bulk(zone, 0,
@@ -1062,9 +1064,15 @@ again:
                }
 
                /* Find a page of the appropriate migrate type */
-               list_for_each_entry(page, &pcp->list, lru)
-                       if (page_private(page) == migratetype)
-                               break;
+               if (cold) {
+                       list_for_each_entry_reverse(page, &pcp->list, lru)
+                               if (page_private(page) == migratetype)
+                                       break;
+               } else {
+                       list_for_each_entry(page, &pcp->list, lru)
+                               if (page_private(page) == migratetype)
+                                       break;
+               }
 
                /* Allocate more to the pcp list if necessary */
                if (unlikely(&page->lru == &pcp->list)) {
@@ -1559,7 +1567,7 @@ nofail_alloc:
        cond_resched();
 
        if (order != 0)
-               drain_all_local_pages();
+               drain_all_pages();
 
        if (likely(did_some_progress)) {
                page = get_page_from_freelist(gfp_mask, order,
@@ -1633,7 +1641,7 @@ EXPORT_SYMBOL(__alloc_pages);
 /*
  * Common helper functions.
  */
-fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 {
        struct page * page;
        page = alloc_pages(gfp_mask, order);
@@ -1644,7 +1652,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 
 EXPORT_SYMBOL(__get_free_pages);
 
-fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
+unsigned long get_zeroed_page(gfp_t gfp_mask)
 {
        struct page * page;
 
@@ -1670,7 +1678,7 @@ void __pagevec_free(struct pagevec *pvec)
                free_hot_cold_page(pvec->pages[i], pvec->cold);
 }
 
-fastcall void __free_pages(struct page *page, unsigned int order)
+void __free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
                if (order == 0)
@@ -1682,7 +1690,7 @@ fastcall void __free_pages(struct page *page, unsigned int order)
 
 EXPORT_SYMBOL(__free_pages);
 
-fastcall void free_pages(unsigned long addr, unsigned int order)
+void free_pages(unsigned long addr, unsigned int order)
 {
        if (addr != 0) {
                VM_BUG_ON(!virt_addr_valid((void *)addr));
@@ -1791,12 +1799,9 @@ void show_free_areas(void)
 
                        pageset = zone_pcp(zone, cpu);
 
-                       printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d   "
-                              "Cold: hi:%5d, btch:%4d usd:%4d\n",
-                              cpu, pageset->pcp[0].high,
-                              pageset->pcp[0].batch, pageset->pcp[0].count,
-                              pageset->pcp[1].high, pageset->pcp[1].batch,
-                              pageset->pcp[1].count);
+                       printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
+                              cpu, pageset->pcp.high,
+                              pageset->pcp.batch, pageset->pcp.count);
                }
        }
 
@@ -1869,6 +1874,8 @@ void show_free_areas(void)
                printk("= %lukB\n", K(total));
        }
 
+       printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
+
        show_swap_cache_info();
 }
 
@@ -2541,8 +2548,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        }
 }
 
-static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
-                               struct zone *zone, unsigned long size)
+static void __meminit zone_init_free_lists(struct zone *zone)
 {
        int order, t;
        for_each_migratetype_order(order, t) {
@@ -2556,7 +2562,7 @@ static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 #endif
 
-static int __devinit zone_batchsize(struct zone *zone)
+static int zone_batchsize(struct zone *zone)
 {
        int batch;
 
@@ -2594,17 +2600,11 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 
        memset(p, 0, sizeof(*p));
 
-       pcp = &p->pcp[0];               /* hot */
+       pcp = &p->pcp;
        pcp->count = 0;
        pcp->high = 6 * batch;
        pcp->batch = max(1UL, 1 * batch);
        INIT_LIST_HEAD(&pcp->list);
-
-       pcp = &p->pcp[1];               /* cold*/
-       pcp->count = 0;
-       pcp->high = 2 * batch;
-       pcp->batch = max(1UL, batch/2);
-       INIT_LIST_HEAD(&pcp->list);
 }
 
 /*
@@ -2617,7 +2617,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
 {
        struct per_cpu_pages *pcp;
 
-       pcp = &p->pcp[0]; /* hot list */
+       pcp = &p->pcp;
        pcp->high = high;
        pcp->batch = max(1UL, high/4);
        if ((high/4) > (PAGE_SHIFT * 8))
@@ -2821,7 +2821,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
 
        memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
 
-       zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+       zone_init_free_lists(zone);
 
        return 0;
 }
@@ -3266,6 +3266,16 @@ static void inline setup_usemap(struct pglist_data *pgdat,
 #endif /* CONFIG_SPARSEMEM */
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+
+/* Return a sensible default order for the pageblock size. */
+static inline int pageblock_default_order(void)
+{
+       if (HPAGE_SHIFT > PAGE_SHIFT)
+               return HUGETLB_PAGE_ORDER;
+
+       return MAX_ORDER-1;
+}
+
 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
 static inline void __init set_pageblock_order(unsigned int order)
 {
@@ -3281,7 +3291,16 @@ static inline void __init set_pageblock_order(unsigned int order)
 }
 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
 
-/* Defined this way to avoid accidently referencing HUGETLB_PAGE_ORDER */
+/*
+ * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
+ * and pageblock_default_order() are unused as pageblock_order is set
+ * at compile-time. See include/linux/pageblock-flags.h for the values of
+ * pageblock_order based on the kernel config
+ */
+static inline int pageblock_default_order(unsigned int order)
+{
+       return MAX_ORDER-1;
+}
 #define set_pageblock_order(x) do {} while (0)
 
 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
@@ -3366,7 +3385,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                if (!size)
                        continue;
 
-               set_pageblock_order(HUGETLB_PAGE_ORDER);
+               set_pageblock_order(pageblock_default_order());
                setup_usemap(pgdat, zone, size);
                ret = init_currently_empty_zone(zone, zone_start_pfn,
                                                size, MEMMAP_EARLY);
@@ -3409,7 +3428,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
                mem_map = NODE_DATA(0)->node_mem_map;
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
-                       mem_map -= pgdat->node_start_pfn;
+                       mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
        }
 #endif
@@ -3949,10 +3968,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
        int cpu = (unsigned long)hcpu;
 
        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-               local_irq_disable();
-               __drain_pages(cpu);
+               drain_pages(cpu);
+
+               /*
+                * Spill the event counters of the dead processor
+                * into the current processors event counters.
+                * This artificially elevates the count of the current
+                * processor.
+                */
                vm_events_fold_cpu(cpu);
-               local_irq_enable();
+
+               /*
+                * Zero the differential counters of the dead processor
+                * so that the vm statistics are consistent.
+                *
+                * This is only okay since the processor is dead and cannot
+                * race with what we are doing.
+                */
                refresh_cpu_vm_stats(cpu);
        }
        return NOTIFY_OK;
@@ -4451,7 +4483,7 @@ int set_migratetype_isolate(struct page *page)
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
        if (!ret)
-               drain_all_local_pages();
+               drain_all_pages();
        return ret;
 }