]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/pci/iova.c
docbook: fix kernel-api source files
[linux-2.6-omap-h63xx.git] / drivers / pci / iova.c
index 717fafaa7e02948b3c0c281e4c47ed86e38c7444..dbcdd6bfa63a60e1c87b1fee1aedfac89ddb07bd 100644 (file)
@@ -3,25 +3,26 @@
  *
  * This file is released under the GPLv2.
  *
- * Copyright (C) 2006 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  */
 
 #include "iova.h"
 
 void
-init_iova_domain(struct iova_domain *iovad)
+init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
 {
        spin_lock_init(&iovad->iova_alloc_lock);
        spin_lock_init(&iovad->iova_rbtree_lock);
        iovad->rbroot = RB_ROOT;
        iovad->cached32_node = NULL;
-
+       iovad->dma_32bit_pfn = pfn_32bit;
 }
 
 static struct rb_node *
 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
 {
-       if ((*limit_pfn != DMA_32BIT_PFN) ||
+       if ((*limit_pfn != iovad->dma_32bit_pfn) ||
                (iovad->cached32_node == NULL))
                return rb_last(&iovad->rbroot);
        else {
@@ -37,7 +38,7 @@ static void
 __cached_rbnode_insert_update(struct iova_domain *iovad,
        unsigned long limit_pfn, struct iova *new)
 {
-       if (limit_pfn != DMA_32BIT_PFN)
+       if (limit_pfn != iovad->dma_32bit_pfn)
                return;
        iovad->cached32_node = &new->node;
 }
@@ -57,12 +58,28 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
                iovad->cached32_node = rb_next(&free->node);
 }
 
-static int __alloc_iova_range(struct iova_domain *iovad,
-       unsigned long size, unsigned long limit_pfn, struct iova *new)
+/* Computes the padding size required, to make the
+ * the start address naturally aligned on its size
+ */
+static int
+iova_get_pad_size(int size, unsigned int limit_pfn)
+{
+       unsigned int pad_size = 0;
+       unsigned int order = ilog2(size);
+
+       if (order)
+               pad_size = (limit_pfn + 1) % (1 << order);
+
+       return pad_size;
+}
+
+static int __alloc_iova_range(struct iova_domain *iovad, unsigned long size,
+               unsigned long limit_pfn, struct iova *new, bool size_aligned)
 {
        struct rb_node *curr = NULL;
        unsigned long flags;
        unsigned long saved_pfn;
+       unsigned int pad_size = 0;
 
        /* Walk the tree backwards */
        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
@@ -72,22 +89,32 @@ static int __alloc_iova_range(struct iova_domain *iovad,
                struct iova *curr_iova = container_of(curr, struct iova, node);
                if (limit_pfn < curr_iova->pfn_lo)
                        goto move_left;
-               if (limit_pfn < curr_iova->pfn_hi)
+               else if (limit_pfn < curr_iova->pfn_hi)
                        goto adjust_limit_pfn;
-               if ((curr_iova->pfn_hi + size) <= limit_pfn)
-                       break;  /* found a free slot */
+               else {
+                       if (size_aligned)
+                               pad_size = iova_get_pad_size(size, limit_pfn);
+                       if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
+                               break;  /* found a free slot */
+               }
 adjust_limit_pfn:
                limit_pfn = curr_iova->pfn_lo - 1;
 move_left:
                curr = rb_prev(curr);
        }
 
-       if ((!curr) && !(IOVA_START_PFN + size <= limit_pfn)) {
-               spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-               return -ENOMEM;
+       if (!curr) {
+               if (size_aligned)
+                       pad_size = iova_get_pad_size(size, limit_pfn);
+               if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
+                       spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+                       return -ENOMEM;
+               }
        }
-       new->pfn_hi = limit_pfn;
-       new->pfn_lo = limit_pfn - size + 1;
+
+       /* pfn_lo will point to size aligned address if size_aligned is set */
+       new->pfn_lo = limit_pfn - (size + pad_size) + 1;
+       new->pfn_hi = new->pfn_lo + size - 1;
 
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        return 0;
@@ -119,12 +146,16 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
  * @iovad - iova domain in question
  * @size - size of page frames to allocate
  * @limit_pfn - max limit address
+ * @size_aligned - set if size_aligned address range is required
  * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
- * looking from limit_pfn instead from IOVA_START_PFN.
+ * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
+ * flag is set then the allocated address iova->pfn_lo will be naturally
+ * aligned on roundup_power_of_two(size).
  */
 struct iova *
 alloc_iova(struct iova_domain *iovad, unsigned long size,
-       unsigned long limit_pfn)
+       unsigned long limit_pfn,
+       bool size_aligned)
 {
        unsigned long flags;
        struct iova *new_iova;
@@ -134,8 +165,15 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
        if (!new_iova)
                return NULL;
 
+       /* If size aligned is set then round the size to
+        * to next power of two.
+        */
+       if (size_aligned)
+               size = __roundup_pow_of_two(size);
+
        spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
-       ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova);
+       ret = __alloc_iova_range(iovad, size, limit_pfn, new_iova,
+                       size_aligned);
 
        if (ret) {
                spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);