]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/pci/intel-iommu.c
Merge branch 'fixes' of master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6-omap-h63xx.git] / drivers / pci / intel-iommu.c
index 4e01df99681ac79c1bbd0842d57ead2933af11d3..301c68fab03befa1c10d60d8fe6d9d65fcd9583d 100644 (file)
  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  *
- * Copyright (C) Ashok Raj <ashok.raj@intel.com>
- * Copyright (C) Shaohua Li <shaohua.li@intel.com>
- * Copyright (C) Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Author: Ashok Raj <ashok.raj@intel.com>
+ * Author: Shaohua Li <shaohua.li@intel.com>
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  */
 
 #include <linux/init.h>
 #include <linux/bitmap.h>
+#include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
@@ -30,6 +32,7 @@
 #include <linux/dmar.h>
 #include <linux/dma-mapping.h>
 #include <linux/mempool.h>
+#include <linux/timer.h>
 #include "iova.h"
 #include "intel-iommu.h"
 #include <asm/proto.h> /* force_iommu in this header in x86-64*/
 
 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
 
+
+static void flush_unmaps_timeout(unsigned long data);
+
+DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
+
+static struct intel_iommu *g_iommus;
+
+#define HIGH_WATER_MARK 250
+struct deferred_flush_tables {
+       int next;
+       struct iova *iova[HIGH_WATER_MARK];
+       struct dmar_domain *domain[HIGH_WATER_MARK];
+};
+
+static struct deferred_flush_tables *deferred_flush;
+
+/* bitmap for indexing intel_iommus */
+static int g_num_of_iommus;
+
+static DEFINE_SPINLOCK(async_umap_flush_lock);
+static LIST_HEAD(unmaps_to_do);
+
+static int timer_on;
+static long list_size;
+
 static void domain_remove_dev_info(struct dmar_domain *domain);
 
 static int dmar_disabled;
 static int __initdata dmar_map_gfx = 1;
 static int dmar_forcedac;
+static int intel_iommu_strict;
 
 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
 static DEFINE_SPINLOCK(device_domain_lock);
@@ -73,9 +102,13 @@ static int __init intel_iommu_setup(char *str)
                        printk(KERN_INFO
                                "Intel-IOMMU: disable GFX device mapping\n");
                } else if (!strncmp(str, "forcedac", 8)) {
-                       printk (KERN_INFO
+                       printk(KERN_INFO
                                "Intel-IOMMU: Forcing DAC for PCI devices\n");
                        dmar_forcedac = 1;
+               } else if (!strncmp(str, "strict", 6)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: disable batched IOTLB flush\n");
+                       intel_iommu_strict = 1;
                }
 
                str += strcspn(str, ",");
@@ -692,6 +725,23 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
                DMA_TLB_PSI_FLUSH, non_present_entry_flush);
 }
 
+static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
+{
+       u32 pmen;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iommu->register_lock, flags);
+       pmen = readl(iommu->reg + DMAR_PMEN_REG);
+       pmen &= ~DMA_PMEN_EPM;
+       writel(pmen, iommu->reg + DMAR_PMEN_REG);
+
+       /* wait for the protected region status bit to clear */
+       IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
+               readl, !(pmen & DMA_PMEN_PRS), pmen);
+
+       spin_unlock_irqrestore(&iommu->register_lock, flags);
+}
+
 static int iommu_enable_translation(struct intel_iommu *iommu)
 {
        u32 sts;
@@ -728,7 +778,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
 
 /* iommu interrupt handling. Most stuff are MSI-like. */
 
-static char *fault_reason_strings[] =
+static const char *fault_reason_strings[] =
 {
        "Software",
        "Present bit in root entry is clear",
@@ -743,14 +793,13 @@ static char *fault_reason_strings[] =
        "non-zero reserved fields in RTP",
        "non-zero reserved fields in CTP",
        "non-zero reserved fields in PTE",
-       "Unknown"
 };
-#define MAX_FAULT_REASON_IDX   ARRAY_SIZE(fault_reason_strings) - 1
+#define MAX_FAULT_REASON_IDX   (ARRAY_SIZE(fault_reason_strings) - 1)
 
-char *dmar_get_fault_reason(u8 fault_reason)
+const char *dmar_get_fault_reason(u8 fault_reason)
 {
-       if (fault_reason >= MAX_FAULT_REASON_IDX)
-               return fault_reason_strings[MAX_FAULT_REASON_IDX - 1];
+       if (fault_reason > MAX_FAULT_REASON_IDX)
+               return "Unknown";
        else
                return fault_reason_strings[fault_reason];
 }
@@ -808,7 +857,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
 static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
                u8 fault_reason, u16 source_id, u64 addr)
 {
-       char *reason;
+       const char *reason;
 
        reason = dmar_get_fault_reason(fault_reason);
 
@@ -949,17 +998,13 @@ static int iommu_init_domains(struct intel_iommu *iommu)
                set_bit(0, iommu->domain_ids);
        return 0;
 }
-
-static struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd)
+static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
+                                       struct dmar_drhd_unit *drhd)
 {
-       struct intel_iommu *iommu;
        int ret;
        int map_size;
        u32 ver;
 
-       iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-       if (!iommu)
-               return NULL;
        iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
        if (!iommu->reg) {
                printk(KERN_ERR "IOMMU: can't map the region\n");
@@ -1080,6 +1125,8 @@ static void iommu_free_domain(struct dmar_domain *domain)
 }
 
 static struct iova_domain reserved_iova_list;
+static struct lock_class_key reserved_alloc_key;
+static struct lock_class_key reserved_rbtree_key;
 
 static void dmar_init_reserved_ranges(void)
 {
@@ -1088,7 +1135,12 @@ static void dmar_init_reserved_ranges(void)
        int i;
        u64 addr, size;
 
-       init_iova_domain(&reserved_iova_list);
+       init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
+
+       lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
+               &reserved_alloc_key);
+       lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
+               &reserved_rbtree_key);
 
        /* IOAPIC ranges shouldn't be accessed by DMA */
        iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
@@ -1142,7 +1194,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        int adjust_width, agaw;
        unsigned long sagaw;
 
-       init_iova_domain(&domain->iovad);
+       init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
        spin_lock_init(&domain->mapping_lock);
 
        domain_reserve_special_ranges(domain);
@@ -1380,7 +1432,7 @@ static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
        int index;
 
        while (dev) {
-               for (index = 0; index < cnt; index ++)
+               for (index = 0; index < cnt; index++)
                        if (dev == devices[index])
                                return 1;
 
@@ -1645,7 +1697,7 @@ int __init init_dmars(void)
        struct dmar_rmrr_unit *rmrr;
        struct pci_dev *pdev;
        struct intel_iommu *iommu;
-       int ret, unit = 0;
+       int i, ret, unit = 0;
 
        /*
         * for each drhd
@@ -1656,7 +1708,34 @@ int __init init_dmars(void)
        for_each_drhd_unit(drhd) {
                if (drhd->ignored)
                        continue;
-               iommu = alloc_iommu(drhd);
+               g_num_of_iommus++;
+               /*
+                * lock not needed as this is only incremented in the single
+                * threaded kernel __init code path all other access are read
+                * only
+                */
+       }
+
+       g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
+       if (!g_iommus) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       deferred_flush = kzalloc(g_num_of_iommus *
+               sizeof(struct deferred_flush_tables), GFP_KERNEL);
+       if (!deferred_flush) {
+               kfree(g_iommus);
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       i = 0;
+       for_each_drhd_unit(drhd) {
+               if (drhd->ignored)
+                       continue;
+               iommu = alloc_iommu(&g_iommus[i], drhd);
+               i++;
                if (!iommu) {
                        ret = -ENOMEM;
                        goto error;
@@ -1689,7 +1768,6 @@ int __init init_dmars(void)
         * endfor
         */
        for_each_rmrr_units(rmrr) {
-               int i;
                for (i = 0; i < rmrr->devices_cnt; i++) {
                        pdev = rmrr->devices[i];
                        /* some BIOS lists non-exist devices in DMAR table */
@@ -1730,6 +1808,8 @@ int __init init_dmars(void)
                iommu_flush_context_global(iommu, 0);
                iommu_flush_iotlb_global(iommu, 0);
 
+               iommu_disable_protect_mem_regions(iommu);
+
                ret = iommu_enable_translation(iommu);
                if (ret)
                        goto error;
@@ -1743,6 +1823,7 @@ error:
                iommu = drhd->iommu;
                free_iommu(iommu);
        }
+       kfree(g_iommus);
        return ret;
 }
 
@@ -1891,6 +1972,59 @@ error:
        return 0;
 }
 
+static void flush_unmaps(void)
+{
+       int i, j;
+
+       timer_on = 0;
+
+       /* just flush them all */
+       for (i = 0; i < g_num_of_iommus; i++) {
+               if (deferred_flush[i].next) {
+                       iommu_flush_iotlb_global(&g_iommus[i], 0);
+                       for (j = 0; j < deferred_flush[i].next; j++) {
+                               __free_iova(&deferred_flush[i].domain[j]->iovad,
+                                               deferred_flush[i].iova[j]);
+                       }
+                       deferred_flush[i].next = 0;
+               }
+       }
+
+       list_size = 0;
+}
+
+static void flush_unmaps_timeout(unsigned long data)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&async_umap_flush_lock, flags);
+       flush_unmaps();
+       spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+}
+
+static void add_unmap(struct dmar_domain *dom, struct iova *iova)
+{
+       unsigned long flags;
+       int next, iommu_id;
+
+       spin_lock_irqsave(&async_umap_flush_lock, flags);
+       if (list_size == HIGH_WATER_MARK)
+               flush_unmaps();
+
+       iommu_id = dom->iommu - g_iommus;
+       next = deferred_flush[iommu_id].next;
+       deferred_flush[iommu_id].domain[next] = dom;
+       deferred_flush[iommu_id].iova[next] = iova;
+       deferred_flush[iommu_id].next++;
+
+       if (!timer_on) {
+               mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
+               timer_on = 1;
+       }
+       list_size++;
+       spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+}
+
 static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
        size_t size, int dir)
 {
@@ -1918,13 +2052,19 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
        dma_pte_clear_range(domain, start_addr, start_addr + size);
        /* free page tables */
        dma_pte_free_pagetable(domain, start_addr, start_addr + size);
-
-       if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
-                       size >> PAGE_SHIFT_4K, 0))
-               iommu_flush_write_buffer(domain->iommu);
-
-       /* free iova */
-       __free_iova(&domain->iovad, iova);
+       if (intel_iommu_strict) {
+               if (iommu_flush_iotlb_psi(domain->iommu,
+                       domain->id, start_addr, size >> PAGE_SHIFT_4K, 0))
+                       iommu_flush_write_buffer(domain->iommu);
+               /* free iova */
+               __free_iova(&domain->iovad, iova);
+       } else {
+               add_unmap(domain, iova);
+               /*
+                * queue up the release of the unmap to save the 1/6th of the
+                * cpu used up by the iotlb flush operation...
+                */
+       }
 }
 
 static void * intel_alloc_coherent(struct device *hwdev, size_t size,
@@ -2263,6 +2403,7 @@ int __init intel_iommu_init(void)
        printk(KERN_INFO
        "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
 
+       init_timer(&unmap_timer);
        force_iommu = 1;
        dma_ops = &intel_dma_ops;
        return 0;