]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/pci/intel-iommu.c
x64, x2apic/intr-remap: fix the need for sequential array allocation of iommus
[linux-2.6-omap-h63xx.git] / drivers / pci / intel-iommu.c
index 1fd8bb7657024c2629858940fc050a35fc3acab7..4d59a6a1f4dd4a977724449f4b13276cf59b8f22 100644 (file)
@@ -49,7 +49,7 @@
 
 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
 
-#define DMAR_OPERATION_TIMEOUT (HZ*60) /* 1m */
+#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */
 
 #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
 
@@ -58,8 +58,6 @@ static void flush_unmaps_timeout(unsigned long data);
 
 DEFINE_TIMER(unmap_timer,  flush_unmaps_timeout, 0, 0);
 
-static struct intel_iommu *g_iommus;
-
 #define HIGH_WATER_MARK 250
 struct deferred_flush_tables {
        int next;
@@ -490,12 +488,12 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
 
 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
 {\
-       unsigned long start_time = jiffies;\
+       cycles_t start_time = get_cycles();\
        while (1) {\
                sts = op (iommu->reg + offset);\
                if (cond)\
                        break;\
-               if (time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT))\
+               if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
                        panic("DMAR hardware is malfunctioning\n");\
                cpu_relax();\
        }\
@@ -990,6 +988,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
                return -ENOMEM;
        }
 
+       spin_lock_init(&iommu->lock);
+
        /*
         * if Caching mode is set, then invalid translations are tagged
         * with domainid 0. Hence we need to pre-allocate it.
@@ -998,62 +998,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
                set_bit(0, iommu->domain_ids);
        return 0;
 }
-static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
-                                       struct dmar_drhd_unit *drhd)
-{
-       int ret;
-       int map_size;
-       u32 ver;
 
-       iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
-       if (!iommu->reg) {
-               printk(KERN_ERR "IOMMU: can't map the region\n");
-               goto error;
-       }
-       iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
-       iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
-       /* the registers might be more than one page */
-       map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
-               cap_max_fault_reg_offset(iommu->cap));
-       map_size = PAGE_ALIGN_4K(map_size);
-       if (map_size > PAGE_SIZE_4K) {
-               iounmap(iommu->reg);
-               iommu->reg = ioremap(drhd->reg_base_addr, map_size);
-               if (!iommu->reg) {
-                       printk(KERN_ERR "IOMMU: can't map the region\n");
-                       goto error;
-               }
-       }
-
-       ver = readl(iommu->reg + DMAR_VER_REG);
-       pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
-               drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
-               iommu->cap, iommu->ecap);
-       ret = iommu_init_domains(iommu);
-       if (ret)
-               goto error_unmap;
-       spin_lock_init(&iommu->lock);
-       spin_lock_init(&iommu->register_lock);
-
-       drhd->iommu = iommu;
-       return iommu;
-error_unmap:
-       iounmap(iommu->reg);
-error:
-       kfree(iommu);
-       return NULL;
-}
 
 static void domain_exit(struct dmar_domain *domain);
-static void free_iommu(struct intel_iommu *iommu)
+
+void free_dmar_iommu(struct intel_iommu *iommu)
 {
        struct dmar_domain *domain;
        int i;
 
-       if (!iommu)
-               return;
-
        i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
        for (; i < cap_ndoms(iommu->cap); ) {
                domain = iommu->domains[i];
@@ -1078,10 +1031,6 @@ static void free_iommu(struct intel_iommu *iommu)
 
        /* free context mapping */
        free_context_table(iommu);
-
-       if (iommu->reg)
-               iounmap(iommu->reg);
-       kfree(iommu);
 }
 
 static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
@@ -1426,37 +1375,6 @@ find_domain(struct pci_dev *pdev)
        return NULL;
 }
 
-static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
-     struct pci_dev *dev)
-{
-       int index;
-
-       while (dev) {
-               for (index = 0; index < cnt; index++)
-                       if (dev == devices[index])
-                               return 1;
-
-               /* Check our parent */
-               dev = dev->bus->self;
-       }
-
-       return 0;
-}
-
-static struct dmar_drhd_unit *
-dmar_find_matched_drhd_unit(struct pci_dev *dev)
-{
-       struct dmar_drhd_unit *drhd = NULL;
-
-       list_for_each_entry(drhd, &dmar_drhd_units, list) {
-               if (drhd->include_all || dmar_pci_device_match(drhd->devices,
-                                               drhd->devices_cnt, dev))
-                       return drhd;
-       }
-
-       return NULL;
-}
-
 /* domain is initialized */
 static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
 {
@@ -1637,12 +1555,43 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
 }
 
 #ifdef CONFIG_DMAR_GFX_WA
-extern int arch_get_ram_range(int slot, u64 *addr, u64 *size);
+struct iommu_prepare_data {
+       struct pci_dev *pdev;
+       int ret;
+};
+
+static int __init iommu_prepare_work_fn(unsigned long start_pfn,
+                                        unsigned long end_pfn, void *datax)
+{
+       struct iommu_prepare_data *data;
+
+       data = (struct iommu_prepare_data *)datax;
+
+       data->ret = iommu_prepare_identity_map(data->pdev,
+                               start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+       return data->ret;
+
+}
+
+static int __init iommu_prepare_with_active_regions(struct pci_dev *pdev)
+{
+       int nid;
+       struct iommu_prepare_data data;
+
+       data.pdev = pdev;
+       data.ret = 0;
+
+       for_each_online_node(nid) {
+               work_with_active_regions(nid, iommu_prepare_work_fn, &data);
+               if (data.ret)
+                       return data.ret;
+       }
+       return data.ret;
+}
+
 static void __init iommu_prepare_gfx_mapping(void)
 {
        struct pci_dev *pdev = NULL;
-       u64 base, size;
-       int slot;
        int ret;
 
        for_each_pci_dev(pdev) {
@@ -1651,17 +1600,9 @@ static void __init iommu_prepare_gfx_mapping(void)
                        continue;
                printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
                        pci_name(pdev));
-               slot = arch_get_ram_range(0, &base, &size);
-               while (slot >= 0) {
-                       ret = iommu_prepare_identity_map(pdev,
-                                       base, base + size);
-                       if (ret)
-                               goto error;
-                       slot = arch_get_ram_range(slot, &base, &size);
-               }
-               continue;
-error:
-               printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
+               ret = iommu_prepare_with_active_regions(pdev);
+               if (ret)
+                       printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
        }
 }
 #endif
@@ -1706,8 +1647,6 @@ int __init init_dmars(void)
         * endfor
         */
        for_each_drhd_unit(drhd) {
-               if (drhd->ignored)
-                       continue;
                g_num_of_iommus++;
                /*
                 * lock not needed as this is only incremented in the single
@@ -1716,31 +1655,26 @@ int __init init_dmars(void)
                 */
        }
 
-       g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
-       if (!g_iommus) {
-               ret = -ENOMEM;
-               goto error;
-       }
-
        deferred_flush = kzalloc(g_num_of_iommus *
                sizeof(struct deferred_flush_tables), GFP_KERNEL);
        if (!deferred_flush) {
-               kfree(g_iommus);
                ret = -ENOMEM;
                goto error;
        }
 
-       i = 0;
        for_each_drhd_unit(drhd) {
                if (drhd->ignored)
                        continue;
-               iommu = alloc_iommu(&g_iommus[i], drhd);
-               i++;
+               iommu = alloc_iommu(drhd);
                if (!iommu) {
                        ret = -ENOMEM;
                        goto error;
                }
 
+               ret = iommu_init_domains(iommu);
+               if (ret)
+                       goto error;
+
                /*
                 * TBD:
                 * we could share the same root & context tables
@@ -1823,7 +1757,6 @@ error:
                iommu = drhd->iommu;
                free_iommu(iommu);
        }
-       kfree(g_iommus);
        return ret;
 }
 
@@ -1980,7 +1913,10 @@ static void flush_unmaps(void)
        /* just flush them all */
        for (i = 0; i < g_num_of_iommus; i++) {
                if (deferred_flush[i].next) {
-                       iommu_flush_iotlb_global(&g_iommus[i], 0);
+                       struct intel_iommu *iommu =
+                               deferred_flush[i].domain[0]->iommu;
+
+                       iommu_flush_iotlb_global(iommu, 0);
                        for (j = 0; j < deferred_flush[i].next; j++) {
                                __free_iova(&deferred_flush[i].domain[j]->iovad,
                                                deferred_flush[i].iova[j]);
@@ -2010,7 +1946,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
        if (list_size == HIGH_WATER_MARK)
                flush_unmaps();
 
-       iommu_id = dom->iommu - g_iommus;
+       iommu_id = dom->iommu->seq_id;
+
        next = deferred_flush[iommu_id].next;
        deferred_flush[iommu_id].domain[next] = dom;
        deferred_flush[iommu_id].iova[next] = iova;