]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/hugetlb.c
slub: Determine gfpflags once and not every time a slab is allocated
[linux-2.6-omap-h63xx.git] / mm / hugetlb.c
index 7224a4f071067c3d60a017d949daa2e97f0e8a5d..cb1b3a7ecdfcc5030ef0547f3544d697f3b6a30e 100644 (file)
 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
 static unsigned long surplus_huge_pages;
+static unsigned long nr_overcommit_huge_pages;
 unsigned long max_huge_pages;
+unsigned long sysctl_overcommit_huge_pages;
 static struct list_head hugepage_freelists[MAX_NUMNODES];
 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
 static unsigned int free_huge_pages_node[MAX_NUMNODES];
 static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
-unsigned long nr_overcommit_huge_pages;
 static int hugetlb_next_nid;
 
 /*
@@ -418,9 +419,14 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma,
        if (free_huge_pages > resv_huge_pages)
                page = dequeue_huge_page(vma, addr);
        spin_unlock(&hugetlb_lock);
-       if (!page)
+       if (!page) {
                page = alloc_buddy_huge_page(vma, addr);
-       return page ? page : ERR_PTR(-VM_FAULT_OOM);
+               if (!page) {
+                       hugetlb_put_quota(vma->vm_file->f_mapping, 1);
+                       return ERR_PTR(-VM_FAULT_OOM);
+               }
+       }
+       return page;
 }
 
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
@@ -600,6 +606,17 @@ int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
        return 0;
 }
 
+int hugetlb_overcommit_handler(struct ctl_table *table, int write,
+                       struct file *file, void __user *buffer,
+                       size_t *length, loff_t *ppos)
+{
+       proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
+       spin_lock(&hugetlb_lock);
+       nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
+       spin_unlock(&hugetlb_lock);
+       return 0;
+}
+
 #endif /* CONFIG_SYSCTL */
 
 int hugetlb_report_meminfo(char *buf)
@@ -694,6 +711,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                dst_pte = huge_pte_alloc(dst, addr);
                if (!dst_pte)
                        goto nomem;
+
+               /* If the pagetables are shared don't copy or take references */
+               if (dst_pte == src_pte)
+                       continue;
+
                spin_lock(&dst->page_table_lock);
                spin_lock(&src->page_table_lock);
                if (!pte_none(*src_pte)) {
@@ -803,6 +825,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
 
        spin_unlock(&mm->page_table_lock);
        copy_huge_page(new_page, old_page, address, vma);
+       __SetPageUptodate(new_page);
        spin_lock(&mm->page_table_lock);
 
        ptep = huge_pte_offset(mm, address & HPAGE_MASK);
@@ -848,6 +871,7 @@ retry:
                        goto out;
                }
                clear_huge_page(page, address);
+               __SetPageUptodate(page);
 
                if (vma->vm_flags & VM_SHARED) {
                        int err;
@@ -1206,8 +1230,10 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
        if (hugetlb_get_quota(inode->i_mapping, chg))
                return -ENOSPC;
        ret = hugetlb_acct_memory(chg);
-       if (ret < 0)
+       if (ret < 0) {
+               hugetlb_put_quota(inode->i_mapping, chg);
                return ret;
+       }
        region_add(&inode->i_mapping->private_list, from, to);
        return 0;
 }