]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/infiniband/core/umem.c
Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[linux-2.6-omap-h63xx.git] / drivers / infiniband / core / umem.c
index 2f54e29dc7a64b3771e6fec854d7f4699b355e17..fe78f7d250991d120adcbf6c93c50b0ee38078c7 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
+#include <linux/dma-attrs.h>
 
 #include "uverbs.h"
 
@@ -55,9 +56,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
                ib_dma_unmap_sg(dev, chunk->page_list,
                                chunk->nents, DMA_BIDIRECTIONAL);
                for (i = 0; i < chunk->nents; ++i) {
+                       struct page *page = sg_page(&chunk->page_list[i]);
+
                        if (umem->writable && dirty)
-                               set_page_dirty_lock(chunk->page_list[i].page);
-                       put_page(chunk->page_list[i].page);
+                               set_page_dirty_lock(page);
+                       put_page(page);
                }
 
                kfree(chunk);
@@ -70,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
  * @addr: userspace virtual address to start at
  * @size: length of region to pin
  * @access: IB_ACCESS_xxx flags for memory being pinned
+ * @dmasync: flush in-flight DMA when the memory region is written
  */
 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-                           size_t size, int access)
+                           size_t size, int access, int dmasync)
 {
        struct ib_umem *umem;
        struct page **page_list;
@@ -85,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        int ret;
        int off;
        int i;
+       DEFINE_DMA_ATTRS(attrs);
+
+       if (dmasync)
+               dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
        if (!can_do_mlock())
                return ERR_PTR(-EPERM);
@@ -164,22 +172,22 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                        }
 
                        chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
+                       sg_init_table(chunk->page_list, chunk->nents);
                        for (i = 0; i < chunk->nents; ++i) {
                                if (vma_list &&
                                    !is_vm_hugetlb_page(vma_list[i + off]))
                                        umem->hugetlb = 0;
-                               chunk->page_list[i].page   = page_list[i + off];
-                               chunk->page_list[i].offset = 0;
-                               chunk->page_list[i].length = PAGE_SIZE;
+                               sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
                        }
 
-                       chunk->nmap = ib_dma_map_sg(context->device,
-                                                   &chunk->page_list[0],
-                                                   chunk->nents,
-                                                   DMA_BIDIRECTIONAL);
+                       chunk->nmap = ib_dma_map_sg_attrs(context->device,
+                                                         &chunk->page_list[0],
+                                                         chunk->nents,
+                                                         DMA_BIDIRECTIONAL,
+                                                         &attrs);
                        if (chunk->nmap <= 0) {
                                for (i = 0; i < chunk->nents; ++i)
-                                       put_page(chunk->page_list[i].page);
+                                       put_page(sg_page(&chunk->page_list[i]));
                                kfree(chunk);
 
                                ret = -ENOMEM;