]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/vmscan.c
h63xx: tsc2101 alsa sound support
[linux-2.6-omap-h63xx.git] / mm / vmscan.c
index 1838c15ca4fd8b3eb0ee70739ea63f765fb4689e..4fe7e3aa02e2f38773105620d2640d779f6381ca 100644 (file)
@@ -700,7 +700,7 @@ int migrate_page_remove_references(struct page *newpage,
         * the page.
         */
        if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
-               return 1;
+               return -EAGAIN;
 
        /*
         * Establish swap ptes for anonymous pages or destroy pte
@@ -721,13 +721,15 @@ int migrate_page_remove_references(struct page *newpage,
         * If the page was not migrated then the PageSwapCache bit
         * is still set and the operation may continue.
         */
-       try_to_unmap(page, 1);
+       if (try_to_unmap(page, 1) == SWAP_FAIL)
+               /* A vma has VM_LOCKED set -> Permanent failure */
+               return -EPERM;
 
        /*
         * Give up if we were unable to remove all mappings.
         */
        if (page_mapcount(page))
-               return 1;
+               return -EAGAIN;
 
        write_lock_irq(&mapping->tree_lock);
 
@@ -738,7 +740,7 @@ int migrate_page_remove_references(struct page *newpage,
        if (!page_mapping(page) || page_count(page) != nr_refs ||
                        *radix_pointer != page) {
                write_unlock_irq(&mapping->tree_lock);
-               return 1;
+               return -EAGAIN;
        }
 
        /*
@@ -813,10 +815,14 @@ EXPORT_SYMBOL(migrate_page_copy);
  */
 int migrate_page(struct page *newpage, struct page *page)
 {
+       int rc;
+
        BUG_ON(PageWriteback(page));    /* Writeback must be complete */
 
-       if (migrate_page_remove_references(newpage, page, 2))
-               return -EAGAIN;
+       rc = migrate_page_remove_references(newpage, page, 2);
+
+       if (rc)
+               return rc;
 
        migrate_page_copy(newpage, page);
 
@@ -1883,7 +1889,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 
        if (!(gfp_mask & __GFP_WAIT) ||
                zone->all_unreclaimable ||
-               atomic_read(&zone->reclaim_in_progress) > 0)
+               atomic_read(&zone->reclaim_in_progress) > 0 ||
+               (p->flags & PF_MEMALLOC))
                        return 0;
 
        node_id = zone->zone_pgdat->node_id;
@@ -1908,7 +1915,12 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                sc.swap_cluster_max = SWAP_CLUSTER_MAX;
 
        cond_resched();
-       p->flags |= PF_MEMALLOC;
+       /*
+        * We need to be able to allocate from the reserves for RECLAIM_SWAP
+        * and we also need to be able to write out pages for RECLAIM_WRITE
+        * and RECLAIM_SWAP.
+        */
+       p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
@@ -1932,11 +1944,10 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * a long time.
                 */
                shrink_slab(sc.nr_scanned, gfp_mask, order);
-               sc.nr_reclaimed = 1;    /* Avoid getting the off node timeout */
        }
 
        p->reclaim_state = NULL;
-       current->flags &= ~PF_MEMALLOC;
+       current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
 
        if (sc.nr_reclaimed == 0)
                zone->last_unsuccessful_zone_reclaim = jiffies;