#include "intel-iommu.h"
#include <asm/proto.h> /* force_iommu in this header in x86-64*/
#include <asm/cacheflush.h>
-#include <asm/iommu.h>
+#include <asm/gart.h>
#include "pci.h"
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
static int dmar_disabled;
static int __initdata dmar_map_gfx = 1;
+static int dmar_forcedac;
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
dmar_map_gfx = 0;
printk(KERN_INFO
"Intel-IOMMU: disable GFX device mapping\n");
+ } else if (!strncmp(str, "forcedac", 8)) {
+ printk (KERN_INFO
+ "Intel-IOMMU: Forcing DAC for PCI devices\n");
+ dmar_forcedac = 1;
}
str += strcspn(str, ",");
non_present_entry_flush);
}
-static int iommu_get_alignment(u64 base, unsigned int size)
-{
- int t = 0;
- u64 end;
-
- end = base + size - 1;
- while (base != end) {
- t++;
- base >>= 1;
- end >>= 1;
- }
- return t;
-}
-
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages, int non_present_entry_flush)
{
- unsigned int align;
+ unsigned int mask;
BUG_ON(addr & (~PAGE_MASK_4K));
BUG_ON(pages == 0);
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
- align = iommu_get_alignment(addr >> PAGE_SHIFT_4K, pages);
+ mask = ilog2(__roundup_pow_of_two(pages));
/* Fallback to domain selective flush if size is too big */
- if (align > cap_max_amask_val(iommu->cap))
+ if (mask > cap_max_amask_val(iommu->cap))
return iommu_flush_iotlb_dsi(iommu, did,
non_present_entry_flush);
- addr >>= PAGE_SHIFT_4K + align;
- addr <<= PAGE_SHIFT_4K + align;
-
- return __iommu_flush_iotlb(iommu, did, addr, align,
+ return __iommu_flush_iotlb(iommu, did, addr, mask,
DMA_TLB_PSI_FLUSH, non_present_entry_flush);
}
return 0;
}
+/* iommu interrupt handling. Most stuff are MSI-like. */
+
+static char *fault_reason_strings[] =
+{
+ "Software",
+ "Present bit in root entry is clear",
+ "Present bit in context entry is clear",
+ "Invalid context entry",
+ "Access beyond MGAW",
+ "PTE Write access is not set",
+ "PTE Read access is not set",
+ "Next page table ptr is invalid",
+ "Root table address invalid",
+ "Context table ptr is invalid",
+ "non-zero reserved fields in RTP",
+ "non-zero reserved fields in CTP",
+ "non-zero reserved fields in PTE",
+ "Unknown"
+};
+#define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) - 1
+
+char *dmar_get_fault_reason(u8 fault_reason)
+{
+ if (fault_reason >= MAX_FAULT_REASON_IDX)
+ return fault_reason_strings[MAX_FAULT_REASON_IDX - 1];
+ else
+ return fault_reason_strings[fault_reason];
+}
+
+void dmar_msi_unmask(unsigned int irq)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ /* unmask it */
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(0, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_mask(unsigned int irq)
+{
+ unsigned long flag;
+ struct intel_iommu *iommu = get_irq_data(irq);
+
+ /* mask it */
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
+ /* Read a reg to force flush the post write */
+ readl(iommu->reg + DMAR_FECTL_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_write(int irq, struct msi_msg *msg)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
+ writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
+ writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+void dmar_msi_read(int irq, struct msi_msg *msg)
+{
+ struct intel_iommu *iommu = get_irq_data(irq);
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
+ msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
+ msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+}
+
+static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type,
+ u8 fault_reason, u16 source_id, u64 addr)
+{
+ char *reason;
+
+ reason = dmar_get_fault_reason(fault_reason);
+
+ printk(KERN_ERR
+ "DMAR:[%s] Request device [%02x:%02x.%d] "
+ "fault addr %llx \n"
+ "DMAR:[fault reason %02d] %s\n",
+ (type ? "DMA Read" : "DMA Write"),
+ (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+ PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
+ return 0;
+}
+
+#define PRIMARY_FAULT_REG_LEN (16)
+static irqreturn_t iommu_page_fault(int irq, void *dev_id)
+{
+ struct intel_iommu *iommu = dev_id;
+ int reg, fault_index;
+ u32 fault_status;
+ unsigned long flag;
+
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+
+ /* TBD: ignore advanced fault log currently */
+ if (!(fault_status & DMA_FSTS_PPF))
+ goto clear_overflow;
+
+ fault_index = dma_fsts_fault_record_index(fault_status);
+ reg = cap_fault_reg_offset(iommu->cap);
+ while (1) {
+ u8 fault_reason;
+ u16 source_id;
+ u64 guest_addr;
+ int type;
+ u32 data;
+
+ /* highest 32 bits */
+ data = readl(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 12);
+ if (!(data & DMA_FRCD_F))
+ break;
+
+ fault_reason = dma_frcd_fault_reason(data);
+ type = dma_frcd_type(data);
+
+ data = readl(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 8);
+ source_id = dma_frcd_source_id(data);
+
+ guest_addr = dmar_readq(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN);
+ guest_addr = dma_frcd_page_addr(guest_addr);
+ /* clear the fault */
+ writel(DMA_FRCD_F, iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 12);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+
+ iommu_page_fault_do_one(iommu, type, fault_reason,
+ source_id, guest_addr);
+
+ fault_index++;
+ if (fault_index > cap_num_fault_regs(iommu->cap))
+ fault_index = 0;
+ spin_lock_irqsave(&iommu->register_lock, flag);
+ }
+clear_overflow:
+ /* clear primary fault overflow */
+ fault_status = readl(iommu->reg + DMAR_FSTS_REG);
+ if (fault_status & DMA_FSTS_PFO)
+ writel(DMA_FSTS_PFO, iommu->reg + DMAR_FSTS_REG);
+
+ spin_unlock_irqrestore(&iommu->register_lock, flag);
+ return IRQ_HANDLED;
+}
+
+int dmar_set_interrupt(struct intel_iommu *iommu)
+{
+ int irq, ret;
+
+ irq = create_irq();
+ if (!irq) {
+ printk(KERN_ERR "IOMMU: no free vectors\n");
+ return -EINVAL;
+ }
+
+ set_irq_data(irq, iommu);
+ iommu->irq = irq;
+
+ ret = arch_setup_dmar_msi(irq);
+ if (ret) {
+ set_irq_data(irq, NULL);
+ iommu->irq = 0;
+ destroy_irq(irq);
+ return 0;
+ }
+
+ /* Force fault register is cleared */
+ iommu_page_fault(irq, iommu);
+
+ ret = request_irq(irq, iommu_page_fault, 0, iommu->name, iommu);
+ if (ret)
+ printk(KERN_ERR "IOMMU: can't request irq\n");
+ return ret;
+}
+
static int iommu_init_domains(struct intel_iommu *iommu)
{
unsigned long ndomains;
return iommu;
error_unmap:
iounmap(iommu->reg);
- iommu->reg = 0;
error:
kfree(iommu);
return NULL;
int i;
u64 addr, size;
- init_iova_domain(&reserved_iova_list);
+ init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
int adjust_width, agaw;
unsigned long sagaw;
- init_iova_domain(&domain->iovad);
+ init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
spin_lock_init(&domain->mapping_lock);
domain_reserve_special_ranges(domain);
list_del(&info->link);
list_del(&info->global);
if (info->dev)
- info->dev->sysdata = NULL;
+ info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
detach_domain_for_dev(info->domain, info->bus, info->devfn);
/*
* find_domain
- * Note: we use struct pci_dev->sysdata stores the info
+ * Note: we use struct pci_dev->dev.archdata.iommu stores the info
*/
struct dmar_domain *
find_domain(struct pci_dev *pdev)
struct device_domain_info *info;
/* No lock here, assumes no domain exit in normal case */
- info = pdev->sysdata;
+ info = pdev->dev.archdata.iommu;
if (info)
return info->domain;
return NULL;
}
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
- pdev->sysdata = info;
+ pdev->dev.archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
return domain;
error:
static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
struct pci_dev *pdev)
{
- if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return 0;
return iommu_prepare_identity_map(pdev, rmrr->base_address,
rmrr->end_address + 1);
}
+#ifdef CONFIG_DMAR_GFX_WA
+extern int arch_get_ram_range(int slot, u64 *addr, u64 *size);
+static void __init iommu_prepare_gfx_mapping(void)
+{
+ struct pci_dev *pdev = NULL;
+ u64 base, size;
+ int slot;
+ int ret;
+
+ for_each_pci_dev(pdev) {
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO ||
+ !IS_GFX_DEVICE(pdev))
+ continue;
+ printk(KERN_INFO "IOMMU: gfx device %s 1-1 mapping\n",
+ pci_name(pdev));
+ slot = arch_get_ram_range(0, &base, &size);
+ while (slot >= 0) {
+ ret = iommu_prepare_identity_map(pdev,
+ base, base + size);
+ if (ret)
+ goto error;
+ slot = arch_get_ram_range(slot, &base, &size);
+ }
+ continue;
+error:
+ printk(KERN_ERR "IOMMU: mapping reserved region failed\n");
+ }
+}
+#endif
+
+#ifdef CONFIG_DMAR_FLOPPY_WA
+static inline void iommu_prepare_isa(void)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+ if (!pdev)
+ return;
+
+ printk(KERN_INFO "IOMMU: Prepare 0-16M unity mapping for LPC\n");
+ ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
+
+ if (ret)
+ printk("IOMMU: Failed to create 0-64M identity map, "
+ "floppy might not work\n");
+
+}
+#else
+static inline void iommu_prepare_isa(void)
+{
+ return;
+}
+#endif /* !CONFIG_DMAR_FLPY_WA */
+
int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
}
}
+ iommu_prepare_gfx_mapping();
+
+ iommu_prepare_isa();
+
/*
* for each drhd
* enable fault log
iommu_flush_write_buffer(iommu);
+ ret = dmar_set_interrupt(iommu);
+ if (ret)
+ goto error;
+
iommu_set_root_entry(iommu);
iommu_flush_context_global(iommu, 0);
}
struct iova *
-iommu_alloc_iova(struct dmar_domain *domain, void *host_addr, size_t size,
- u64 start, u64 end)
+iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end)
{
- u64 start_addr;
struct iova *piova;
/* Make sure it's in range */
- if ((start > DOMAIN_MAX_ADDR(domain->gaw)) || end < start)
- return NULL;
-
end = min_t(u64, DOMAIN_MAX_ADDR(domain->gaw), end);
- start_addr = PAGE_ALIGN_4K(start);
- size = aligned_size((u64)host_addr, size);
- if (!size || (start_addr + size > end))
+ if (!size || (IOVA_START_ADDR + size > end))
return NULL;
piova = alloc_iova(&domain->iovad,
- size >> PAGE_SHIFT_4K, IOVA_PFN(end));
-
+ size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1);
return piova;
}
-static dma_addr_t __intel_map_single(struct device *dev, void *addr,
- size_t size, int dir, u64 *flush_addr, unsigned int *flush_size)
+static struct iova *
+__intel_alloc_iova(struct device *dev, struct dmar_domain *domain,
+ size_t size)
{
- struct dmar_domain *domain;
struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
- int prot = 0;
struct iova *iova = NULL;
- u64 start_addr;
-
- addr = (void *)virt_to_phys(addr);
-
- domain = get_domain_for_dev(pdev,
- DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain) {
- printk(KERN_ERR
- "Allocating domain for %s failed", pci_name(pdev));
- return 0;
- }
- start_addr = IOVA_START_ADDR;
-
- if (pdev->dma_mask <= DMA_32BIT_MASK) {
- iova = iommu_alloc_iova(domain, addr, size, start_addr,
- pdev->dma_mask);
+ if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) {
+ iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
} else {
/*
* First try to allocate an io virtual address in
* DMA_32BIT_MASK and if that fails then try allocating
- * from higer range
+ * from higher range
*/
- iova = iommu_alloc_iova(domain, addr, size, start_addr,
- DMA_32BIT_MASK);
+ iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK);
if (!iova)
- iova = iommu_alloc_iova(domain, addr, size, start_addr,
- pdev->dma_mask);
+ iova = iommu_alloc_iova(domain, size, pdev->dma_mask);
}
if (!iova) {
printk(KERN_ERR"Allocating iova for %s failed", pci_name(pdev));
- return 0;
+ return NULL;
+ }
+
+ return iova;
+}
+
+static struct dmar_domain *
+get_valid_domain_for_dev(struct pci_dev *pdev)
+{
+ struct dmar_domain *domain;
+ int ret;
+
+ domain = get_domain_for_dev(pdev,
+ DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ if (!domain) {
+ printk(KERN_ERR
+ "Allocating domain for %s failed", pci_name(pdev));
+ return NULL;
}
/* make sure context mapping is ok */
if (unlikely(!domain_context_mapped(domain, pdev))) {
ret = domain_context_mapping(domain, pdev);
- if (ret)
- goto error;
+ if (ret) {
+ printk(KERN_ERR
+ "Domain context map for %s failed",
+ pci_name(pdev));
+ return NULL;
+ }
}
+ return domain;
+}
+
+static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
+ size_t size, int dir)
+{
+ struct pci_dev *pdev = to_pci_dev(hwdev);
+ int ret;
+ struct dmar_domain *domain;
+ unsigned long start_addr;
+ struct iova *iova;
+ int prot = 0;
+
+ BUG_ON(dir == DMA_NONE);
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ return virt_to_bus(addr);
+
+ domain = get_valid_domain_for_dev(pdev);
+ if (!domain)
+ return 0;
+
+ addr = (void *)virt_to_phys(addr);
+ size = aligned_size((u64)addr, size);
+
+ iova = __intel_alloc_iova(hwdev, domain, size);
+ if (!iova)
+ goto error;
+
+ start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+
/*
* Check if DMAR supports zero-length reads on write only
* mappings..
* might have two guest_addr mapping to the same host addr, but this
* is not a big problem
*/
- ret = domain_page_mapping(domain, iova->pfn_lo << PAGE_SHIFT_4K,
- ((u64)addr) & PAGE_MASK_4K,
- (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT_4K, prot);
+ ret = domain_page_mapping(domain, start_addr,
+ ((u64)addr) & PAGE_MASK_4K, size, prot);
if (ret)
goto error;
pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n",
pci_name(pdev), size, (u64)addr,
- (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT_4K,
- (u64)(iova->pfn_lo << PAGE_SHIFT_4K), dir);
+ size, (u64)start_addr, dir);
+
+ /* it's a non-present to present mapping */
+ ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
+ start_addr, size >> PAGE_SHIFT_4K, 1);
+ if (ret)
+ iommu_flush_write_buffer(domain->iommu);
+
+ return (start_addr + ((u64)addr & (~PAGE_MASK_4K)));
- *flush_addr = iova->pfn_lo << PAGE_SHIFT_4K;
- *flush_size = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT_4K;
- return (iova->pfn_lo << PAGE_SHIFT_4K) + ((u64)addr & (~PAGE_MASK_4K));
error:
- __free_iova(&domain->iovad, iova);
+ if (iova)
+ __free_iova(&domain->iovad, iova);
printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n",
pci_name(pdev), size, (u64)addr, dir);
return 0;
}
-static dma_addr_t intel_map_single(struct device *hwdev, void *addr,
+static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
size_t size, int dir)
{
- struct pci_dev *pdev = to_pci_dev(hwdev);
- dma_addr_t ret;
- struct dmar_domain *domain;
- u64 flush_addr;
- unsigned int flush_size;
-
- BUG_ON(dir == DMA_NONE);
- if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
- return virt_to_bus(addr);
-
- ret = __intel_map_single(hwdev, addr, size,
- dir, &flush_addr, &flush_size);
- if (ret) {
- domain = find_domain(pdev);
- /* it's a non-present to present mapping */
- if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
- flush_addr, flush_size >> PAGE_SHIFT_4K, 1))
- iommu_flush_write_buffer(domain->iommu);
- }
- return ret;
-}
-
-static void __intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
- size_t size, int dir, u64 *flush_addr, unsigned int *flush_size)
-{
- struct dmar_domain *domain;
struct pci_dev *pdev = to_pci_dev(dev);
+ struct dmar_domain *domain;
+ unsigned long start_addr;
struct iova *iova;
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ return;
domain = find_domain(pdev);
BUG_ON(!domain);
iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
- if (!iova) {
- *flush_size = 0;
+ if (!iova)
return;
- }
- pr_debug("Device %s unmapping: %lx@%llx\n",
- pci_name(pdev),
- (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT_4K,
- (u64)(iova->pfn_lo << PAGE_SHIFT_4K));
-
- *flush_addr = iova->pfn_lo << PAGE_SHIFT_4K;
- *flush_size = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT_4K;
- /* clear the whole page, not just dev_addr - (dev_addr + size) */
- dma_pte_clear_range(domain, *flush_addr, *flush_addr + *flush_size);
- /* free page tables */
- dma_pte_free_pagetable(domain, *flush_addr, *flush_addr + *flush_size);
- /* free iova */
- __free_iova(&domain->iovad, iova);
-}
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
- size_t size, int dir)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct dmar_domain *domain;
- u64 flush_addr;
- unsigned int flush_size;
+ start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+ size = aligned_size((u64)dev_addr, size);
- if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
- return;
+ pr_debug("Device %s unmapping: %lx@%llx\n",
+ pci_name(pdev), size, (u64)start_addr);
- domain = find_domain(pdev);
- __intel_unmap_single(dev, dev_addr, size,
- dir, &flush_addr, &flush_size);
- if (flush_size == 0)
- return;
- if (iommu_flush_iotlb_psi(domain->iommu, domain->id, flush_addr,
- flush_size >> PAGE_SHIFT_4K, 0))
+ /* clear the whole page */
+ dma_pte_clear_range(domain, start_addr, start_addr + size);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+
+ if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
+ size >> PAGE_SHIFT_4K, 0))
iommu_flush_write_buffer(domain->iommu);
+
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
}
static void * intel_alloc_coherent(struct device *hwdev, size_t size,
free_pages((unsigned long)vaddr, order);
}
-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sg,
+#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
+static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, int dir)
{
int i;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
- u64 flush_addr;
- unsigned int flush_size;
+ unsigned long start_addr;
+ struct iova *iova;
+ size_t size = 0;
+ void *addr;
+ struct scatterlist *sg;
- if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return;
domain = find_domain(pdev);
- for (i = 0; i < nelems; i++, sg++)
- __intel_unmap_single(hwdev, sg->dma_address,
- sg->dma_length, dir, &flush_addr, &flush_size);
- if (iommu_flush_iotlb_dsi(domain->iommu, domain->id, 0))
+ iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
+ if (!iova)
+ return;
+ for_each_sg(sglist, sg, nelems, i) {
+ addr = SG_ENT_VIRT_ADDRESS(sg);
+ size += aligned_size((u64)addr, sg->length);
+ }
+
+ start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+
+ /* clear the whole page */
+ dma_pte_clear_range(domain, start_addr, start_addr + size);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_addr, start_addr + size);
+
+ if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
+ size >> PAGE_SHIFT_4K, 0))
iommu_flush_write_buffer(domain->iommu);
+
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
}
-#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sg, int nelems, int dir)
+ struct scatterlist *sglist, int nelems, int dir)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nelems; i++) {
- struct scatterlist *s = &sg[i];
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(s));
- s->dma_length = s->length;
+ for_each_sg(sglist, sg, nelems, i) {
+ BUG_ON(!sg_page(sg));
+ sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg));
+ sg->dma_length = sg->length;
}
return nelems;
}
-static int intel_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nelems, int dir)
+static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist,
+ int nelems, int dir)
{
void *addr;
int i;
- dma_addr_t dma_handle;
struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain;
- u64 flush_addr;
- unsigned int flush_size;
+ size_t size = 0;
+ int prot = 0;
+ size_t offset = 0;
+ struct iova *iova = NULL;
+ int ret;
+ struct scatterlist *sg;
+ unsigned long start_addr;
BUG_ON(dir == DMA_NONE);
- if (pdev->sysdata == DUMMY_DEVICE_DOMAIN_INFO)
- return intel_nontranslate_map_sg(hwdev, sg, nelems, dir);
+ if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
+ return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
+
+ domain = get_valid_domain_for_dev(pdev);
+ if (!domain)
+ return 0;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ addr = SG_ENT_VIRT_ADDRESS(sg);
+ addr = (void *)virt_to_phys(addr);
+ size += aligned_size((u64)addr, sg->length);
+ }
+
+ iova = __intel_alloc_iova(hwdev, domain, size);
+ if (!iova) {
+ sglist->dma_length = 0;
+ return 0;
+ }
+
+ /*
+ * Check if DMAR supports zero-length reads on write only
+ * mappings..
+ */
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
+ !cap_zlr(domain->iommu->cap))
+ prot |= DMA_PTE_READ;
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ prot |= DMA_PTE_WRITE;
- for (i = 0; i < nelems; i++, sg++) {
+ start_addr = iova->pfn_lo << PAGE_SHIFT_4K;
+ offset = 0;
+ for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg);
- dma_handle = __intel_map_single(hwdev, addr,
- sg->length, dir, &flush_addr, &flush_size);
- if (!dma_handle) {
- intel_unmap_sg(hwdev, sg - i, i, dir);
- sg[0].dma_length = 0;
+ addr = (void *)virt_to_phys(addr);
+ size = aligned_size((u64)addr, sg->length);
+ ret = domain_page_mapping(domain, start_addr + offset,
+ ((u64)addr) & PAGE_MASK_4K,
+ size, prot);
+ if (ret) {
+ /* clear the page */
+ dma_pte_clear_range(domain, start_addr,
+ start_addr + offset);
+ /* free page tables */
+ dma_pte_free_pagetable(domain, start_addr,
+ start_addr + offset);
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
return 0;
}
- sg->dma_address = dma_handle;
+ sg->dma_address = start_addr + offset +
+ ((u64)addr & (~PAGE_MASK_4K));
sg->dma_length = sg->length;
+ offset += size;
}
- domain = find_domain(pdev);
-
/* it's a non-present to present mapping */
- if (iommu_flush_iotlb_dsi(domain->iommu, domain->id, 1))
+ if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
+ start_addr, offset >> PAGE_SHIFT_4K, 1))
iommu_flush_write_buffer(domain->iommu);
return nelems;
}
for (i = 0; i < drhd->devices_cnt; i++) {
if (!drhd->devices[i])
continue;
- drhd->devices[i]->sysdata = DUMMY_DEVICE_DOMAIN_INFO;
+ drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
}
}
}
dma_ops = &intel_dma_ops;
return 0;
}
+