]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/infiniband/core/umem.c
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[linux-2.6-omap-h63xx.git] / drivers / infiniband / core / umem.c
index 26d0470eef6ec2c2a3abb706c821cc23437e26c1..4e3128ff73c135ce988cdbb9d75fd58981faa26b 100644 (file)
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
+#include <linux/hugetlb.h>
 
 #include "uverbs.h"
 
+#define IB_UMEM_MAX_PAGE_CHUNK                                         \
+       ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /      \
+        ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -        \
+         (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
+
 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
 {
        struct ib_umem_chunk *chunk, *tmp;
@@ -49,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
                ib_dma_unmap_sg(dev, chunk->page_list,
                                chunk->nents, DMA_BIDIRECTIONAL);
                for (i = 0; i < chunk->nents; ++i) {
+                       struct page *page = sg_page(&chunk->page_list[i]);
+
                        if (umem->writable && dirty)
-                               set_page_dirty_lock(chunk->page_list[i].page);
-                       put_page(chunk->page_list[i].page);
+                               set_page_dirty_lock(page);
+                       put_page(page);
                }
 
                kfree(chunk);
@@ -70,6 +78,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 {
        struct ib_umem *umem;
        struct page **page_list;
+       struct vm_area_struct **vma_list;
        struct ib_umem_chunk *chunk;
        unsigned long locked;
        unsigned long lock_limit;
@@ -99,6 +108,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
         */
        umem->writable  = !!(access & ~IB_ACCESS_REMOTE_READ);
 
+       /* We assume the memory is from hugetlb until proved otherwise */
+       umem->hugetlb   = 1;
+
        INIT_LIST_HEAD(&umem->chunk_list);
 
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
@@ -107,6 +119,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                return ERR_PTR(-ENOMEM);
        }
 
+       /*
+        * if we can't alloc the vma_list, it's not so bad;
+        * just assume the memory is not hugetlb memory
+        */
+       vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
+       if (!vma_list)
+               umem->hugetlb = 0;
+
        npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
 
        down_write(&current->mm->mmap_sem);
@@ -126,7 +146,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                ret = get_user_pages(current, current->mm, cur_base,
                                     min_t(int, npages,
                                           PAGE_SIZE / sizeof (struct page *)),
-                                    1, !umem->writable, page_list, NULL);
+                                    1, !umem->writable, page_list, vma_list);
 
                if (ret < 0)
                        goto out;
@@ -146,10 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                        }
 
                        chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+                       sg_init_table(chunk->page_list, chunk->nents);
                        for (i = 0; i < chunk->nents; ++i) {
-                               chunk->page_list[i].page   = page_list[i + off];
-                               chunk->page_list[i].offset = 0;
-                               chunk->page_list[i].length = PAGE_SIZE;
+                               if (vma_list &&
+                                   !is_vm_hugetlb_page(vma_list[i + off]))
+                                       umem->hugetlb = 0;
+                               sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
                        }
 
                        chunk->nmap = ib_dma_map_sg(context->device,
@@ -158,7 +180,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                                                    DMA_BIDIRECTIONAL);
                        if (chunk->nmap <= 0) {
                                for (i = 0; i < chunk->nents; ++i)
-                                       put_page(chunk->page_list[i].page);
+                                       put_page(sg_page(&chunk->page_list[i]));
                                kfree(chunk);
 
                                ret = -ENOMEM;
@@ -181,6 +203,8 @@ out:
                current->mm->locked_vm = locked;
 
        up_write(&current->mm->mmap_sem);
+       if (vma_list)
+               free_page((unsigned long) vma_list);
        free_page((unsigned long) page_list);
 
        return ret < 0 ? ERR_PTR(ret) : umem;