]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - mm/hugetlb.c
hugetlb: factor out prep_new_huge_page
[linux-2.6-omap-h63xx.git] / mm / hugetlb.c
index 65616941a383a3304bbccdd50a1858b112195534..32dff4290c66ede5e8fea935b7242e78b7d11928 100644 (file)
@@ -199,22 +199,11 @@ static long region_count(struct list_head *head, long f, long t)
        return chg;
 }
 
-/*
- * Convert the address within this vma to the page offset within
- * the mapping, in base page units.
- */
-static pgoff_t vma_page_offset(struct vm_area_struct *vma,
-                               unsigned long address)
-{
-       return ((address - vma->vm_start) >> PAGE_SHIFT) +
-                                       (vma->vm_pgoff >> PAGE_SHIFT);
-}
-
 /*
  * Convert the address within this vma to the page offset within
  * the mapping, in pagecache page units; huge pages here.
  */
-static pgoff_t vma_pagecache_offset(struct vm_area_struct *vma,
+static pgoff_t vma_hugecache_offset(struct vm_area_struct *vma,
                                        unsigned long address)
 {
        return ((address - vma->vm_start) >> HPAGE_SHIFT) +
@@ -524,6 +513,16 @@ static int adjust_pool_surplus(int delta)
        return ret;
 }
 
+static void prep_new_huge_page(struct page *page, int nid)
+{
+       set_compound_page_dtor(page, free_huge_page);
+       spin_lock(&hugetlb_lock);
+       nr_huge_pages++;
+       nr_huge_pages_node[nid]++;
+       spin_unlock(&hugetlb_lock);
+       put_page(page); /* free it into the hugepage allocator */
+}
+
 static struct page *alloc_fresh_huge_page_node(int nid)
 {
        struct page *page;
@@ -537,12 +536,7 @@ static struct page *alloc_fresh_huge_page_node(int nid)
                        __free_pages(page, HUGETLB_PAGE_ORDER);
                        return NULL;
                }
-               set_compound_page_dtor(page, free_huge_page);
-               spin_lock(&hugetlb_lock);
-               nr_huge_pages++;
-               nr_huge_pages_node[nid]++;
-               spin_unlock(&hugetlb_lock);
-               put_page(page); /* free it into the hugepage allocator */
+               prep_new_huge_page(page, nid);
        }
 
        return page;
@@ -806,7 +800,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
        struct inode *inode = mapping->host;
 
        if (vma->vm_flags & VM_SHARED) {
-               pgoff_t idx = vma_pagecache_offset(vma, addr);
+               pgoff_t idx = vma_hugecache_offset(vma, addr);
                return region_chg(&inode->i_mapping->private_list,
                                                        idx, idx + 1);
 
@@ -815,7 +809,7 @@ static int vma_needs_reservation(struct vm_area_struct *vma, unsigned long addr)
 
        } else  {
                int err;
-               pgoff_t idx = vma_pagecache_offset(vma, addr);
+               pgoff_t idx = vma_hugecache_offset(vma, addr);
                struct resv_map *reservations = vma_resv_map(vma);
 
                err = region_chg(&reservations->regions, idx, idx + 1);
@@ -831,11 +825,11 @@ static void vma_commit_reservation(struct vm_area_struct *vma,
        struct inode *inode = mapping->host;
 
        if (vma->vm_flags & VM_SHARED) {
-               pgoff_t idx = vma_pagecache_offset(vma, addr);
+               pgoff_t idx = vma_hugecache_offset(vma, addr);
                region_add(&inode->i_mapping->private_list, idx, idx + 1);
 
        } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
-               pgoff_t idx = vma_pagecache_offset(vma, addr);
+               pgoff_t idx = vma_hugecache_offset(vma, addr);
                struct resv_map *reservations = vma_resv_map(vma);
 
                /* Mark this page used in the map. */
@@ -1153,8 +1147,8 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
        unsigned long end;
 
        if (reservations) {
-               start = vma_pagecache_offset(vma, vma->vm_start);
-               end = vma_pagecache_offset(vma, vma->vm_end);
+               start = vma_hugecache_offset(vma, vma->vm_start);
+               end = vma_hugecache_offset(vma, vma->vm_end);
 
                reserve = (end - start) -
                        region_count(&reservations->regions, start, end);
@@ -1471,7 +1465,7 @@ static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma,
        pgoff_t idx;
 
        mapping = vma->vm_file->f_mapping;
-       idx = vma_pagecache_offset(vma, address);
+       idx = vma_hugecache_offset(vma, address);
 
        return find_lock_page(mapping, idx);
 }
@@ -1499,7 +1493,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        mapping = vma->vm_file->f_mapping;
-       idx = vma_pagecache_offset(vma, address);
+       idx = vma_hugecache_offset(vma, address);
 
        /*
         * Use page lock to guard against racing truncation