]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/hugetlb.c
[PATCH] lpfc 8.1.3: Protect NPL lists with host lock
[linux-2.6-omap-h63xx.git] / mm / hugetlb.c
index eb405565949da5bd2bee78df2ea2b6dbb478c64c..508707704d2cb714968a779e128084d640afc439 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/nodemask.h>
 #include <linux/pagemap.h>
 #include <linux/mempolicy.h>
+#include <linux/cpuset.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -48,7 +49,8 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
 
        for (z = zonelist->zones; *z; z++) {
                nid = (*z)->zone_pgdat->node_id;
-               if (!list_empty(&hugepage_freelists[nid]))
+               if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
+                   !list_empty(&hugepage_freelists[nid]))
                        break;
        }
 
@@ -83,7 +85,7 @@ void free_huge_page(struct page *page)
        BUG_ON(page_count(page));
 
        INIT_LIST_HEAD(&page->lru);
-       page[1].mapping = NULL;
+       page[1].lru.next = NULL;                        /* reset dtor */
 
        spin_lock(&hugetlb_lock);
        enqueue_huge_page(page);
@@ -103,9 +105,9 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
        }
        spin_unlock(&hugetlb_lock);
        set_page_count(page, 1);
-       page[1].mapping = (void *)free_huge_page;
+       page[1].lru.next = (void *)free_huge_page;      /* set dtor */
        for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
-               clear_highpage(&page[i]);
+               clear_user_highpage(&page[i], addr);
        return page;
 }
 
@@ -368,43 +370,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        flush_tlb_range(vma, start, end);
 }
 
-static struct page *find_or_alloc_huge_page(struct vm_area_struct *vma,
-                       unsigned long addr, struct address_space *mapping,
-                       unsigned long idx, int shared)
-{
-       struct page *page;
-       int err;
-
-retry:
-       page = find_lock_page(mapping, idx);
-       if (page)
-               goto out;
-
-       if (hugetlb_get_quota(mapping))
-               goto out;
-       page = alloc_huge_page(vma, addr);
-       if (!page) {
-               hugetlb_put_quota(mapping);
-               goto out;
-       }
-
-       if (shared) {
-               err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
-               if (err) {
-                       put_page(page);
-                       hugetlb_put_quota(mapping);
-                       if (err == -EEXIST)
-                               goto retry;
-                       page = NULL;
-               }
-       } else {
-               /* Caller expects a locked page */
-               lock_page(page);
-       }
-out:
-       return page;
-}
-
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, pte_t pte)
 {
@@ -426,12 +391,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (!new_page) {
                page_cache_release(old_page);
-
-               /* Logically this is OOM, not a SIGBUS, but an OOM
-                * could cause the kernel to go killing other
-                * processes which won't help the hugepage situation
-                * at all (?) */
-               return VM_FAULT_SIGBUS;
+               return VM_FAULT_OOM;
        }
 
        spin_unlock(&mm->page_table_lock);
@@ -471,12 +431,32 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * Use page lock to guard against racing truncation
         * before we get page_table_lock.
         */
-       page = find_or_alloc_huge_page(vma, address, mapping, idx,
-                       vma->vm_flags & VM_SHARED);
-       if (!page)
-               goto out;
+retry:
+       page = find_lock_page(mapping, idx);
+       if (!page) {
+               if (hugetlb_get_quota(mapping))
+                       goto out;
+               page = alloc_huge_page(vma, address);
+               if (!page) {
+                       hugetlb_put_quota(mapping);
+                       ret = VM_FAULT_OOM;
+                       goto out;
+               }
 
-       BUG_ON(!PageLocked(page));
+               if (vma->vm_flags & VM_SHARED) {
+                       int err;
+
+                       err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+                       if (err) {
+                               put_page(page);
+                               hugetlb_put_quota(mapping);
+                               if (err == -EEXIST)
+                                       goto retry;
+                               goto out;
+                       }
+               } else
+                       lock_page(page);
+       }
 
        spin_lock(&mm->page_table_lock);
        size = i_size_read(mapping->host) >> HPAGE_SHIFT;