cache width is.
 
 int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 
 int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr)
 
 In some circumstances dma_map_single and dma_map_page will fail to create
 a mapping. A driver can check for these errors by testing the returned
 
        /*
         * Trying to unmap an invalid mapping
         */
-       if (dma_mapping_error(dma_addr)) {
+       if (dma_mapping_error(dev, dma_addr)) {
                dev_err(dev, "Trying to unmap invalid mapping\n");
                return;
        }
 
 }
 
 int
-hwsw_dma_mapping_error (dma_addr_t dma_addr)
+hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr);
+       return hwiommu_dma_mapping_error(dev, dma_addr) ||
+               swiotlb_dma_mapping_error(dev, dma_addr);
 }
 
 EXPORT_SYMBOL(hwsw_dma_mapping_error);
 
 }
 
 int
-sba_dma_mapping_error (dma_addr_t dma_addr)
+sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
 
 }
 EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
 
-int sn_dma_mapping_error(dma_addr_t dma_addr)
+int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
 
 
 EXPORT_SYMBOL(dma_sync_sg_for_device);
 
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
 
 
        dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
                                       PAGE_SIZE, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dummy_page_da)) {
+       if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
                pr_err("PCIEX:Map dummy page failed.\n");
                kfree(dummy_page_va);
                return -1;
 
 
        dummy_page_da = dma_map_single(phb->parent, dummy_page_va,
                                       PAGE_SIZE, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dummy_page_da)) {
+       if (dma_mapping_error(phb->parent, dummy_page_da)) {
                pr_err("SPIDER-IOWA:Map dummy page filed.\n");
                kfree(dummy_page_va);
                return -1;
 
                count = 256 - off;
 
        dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dma_addr))
+       if (dma_mapping_error(NULL, dma_addr))
                return -ENOMEM;
        memset(page, 0, off + count);
        memset(&vsp_cmd, 0, sizeof(vsp_cmd));
 
        return ret;
 }
 
-static const struct dma_mapping_ops calgary_dma_ops = {
+static struct dma_mapping_ops calgary_dma_ops = {
        .alloc_coherent = calgary_alloc_coherent,
        .map_single = calgary_map_single,
        .unmap_single = calgary_unmap_single,
 
 
 static int forbid_dac __read_mostly;
 
-const struct dma_mapping_ops *dma_ops;
+struct dma_mapping_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
 static int iommu_sac_force __read_mostly;
 
 int dma_supported(struct device *dev, u64 mask)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
 #ifdef CONFIG_PCI
        if (mask > 0xffffffff && forbid_dac > 0) {
                dev_info(dev, "PCI: Disallowing DAC for device\n");
        }
 #endif
 
-       if (dma_ops->dma_supported)
-               return dma_ops->dma_supported(dev, mask);
+       if (ops->dma_supported)
+               return ops->dma_supported(dev, mask);
 
        /* Copied from i386. Doesn't make much sense, because it will
           only work for pci_alloc_coherent.
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
                   gfp_t gfp)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
        void *memory = NULL;
        struct page *page;
        unsigned long dma_mask = 0;
                        /* Let low level make its own zone decisions */
                        gfp &= ~(GFP_DMA32|GFP_DMA);
 
-                       if (dma_ops->alloc_coherent)
-                               return dma_ops->alloc_coherent(dev, size,
+                       if (ops->alloc_coherent)
+                               return ops->alloc_coherent(dev, size,
                                                           dma_handle, gfp);
                        return NULL;
                }
                }
        }
 
-       if (dma_ops->alloc_coherent) {
+       if (ops->alloc_coherent) {
                free_pages((unsigned long)memory, get_order(size));
                gfp &= ~(GFP_DMA|GFP_DMA32);
-               return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
+               return ops->alloc_coherent(dev, size, dma_handle, gfp);
        }
 
-       if (dma_ops->map_simple) {
-               *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
+       if (ops->map_simple) {
+               *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
                                              size,
                                              PCI_DMA_BIDIRECTIONAL);
                if (*dma_handle != bad_dma_address)
 void dma_free_coherent(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t bus)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
        int order = get_order(size);
        WARN_ON(irqs_disabled());       /* for portability */
        if (dma_release_coherent(dev, order, vaddr))
                return;
-       if (dma_ops->unmap_single)
-               dma_ops->unmap_single(dev, bus, size, 0);
+       if (ops->unmap_single)
+               ops->unmap_single(dev, bus, size, 0);
        free_pages((unsigned long)vaddr, order);
 }
 EXPORT_SYMBOL(dma_free_coherent);
 
 
 extern int agp_amd64_init(void);
 
-static const struct dma_mapping_ops gart_dma_ops = {
-       .mapping_error                  = NULL,
+static struct dma_mapping_ops gart_dma_ops = {
        .map_single                     = gart_map_single,
        .map_simple                     = gart_map_simple,
        .unmap_single                   = gart_unmap_single,
 
        return nents;
 }
 
-/* Make sure we keep the same behaviour */
-static int nommu_mapping_error(dma_addr_t dma_addr)
-{
-#ifdef CONFIG_X86_32
-       return 0;
-#else
-       return (dma_addr == bad_dma_address);
-#endif
-}
-
-
-const struct dma_mapping_ops nommu_dma_ops = {
+struct dma_mapping_ops nommu_dma_ops = {
        .map_single = nommu_map_single,
        .map_sg = nommu_map_sg,
-       .mapping_error = nommu_mapping_error,
        .is_phys = 1,
 };
 
 
        return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
 }
 
-const struct dma_mapping_ops swiotlb_dma_ops = {
+struct dma_mapping_ops swiotlb_dma_ops = {
        .mapping_error = swiotlb_dma_mapping_error,
        .alloc_coherent = swiotlb_alloc_coherent,
        .free_coherent = swiotlb_free_coherent,
 
 
                address = dma_map_page(card->device, buffer->pages[i],
                                       0, PAGE_SIZE, direction);
-               if (dma_mapping_error(address)) {
+               if (dma_mapping_error(card->device, address)) {
                        __free_page(buffer->pages[i]);
                        goto out_pages;
                }
 
                payload_bus =
                        dma_map_single(ohci->card.device, packet->payload,
                                       packet->payload_length, DMA_TO_DEVICE);
-               if (dma_mapping_error(payload_bus)) {
+               if (dma_mapping_error(ohci->card.device, payload_bus)) {
                        packet->ack = RCODE_SEND_ERROR;
                        return -1;
                }
 
        orb->response_bus =
                dma_map_single(device->card->device, &orb->response,
                               sizeof(orb->response), DMA_FROM_DEVICE);
-       if (dma_mapping_error(orb->response_bus))
+       if (dma_mapping_error(device->card->device, orb->response_bus))
                goto fail_mapping_response;
 
        orb->request.response.high = 0;
        orb->base.request_bus =
                dma_map_single(device->card->device, &orb->request,
                               sizeof(orb->request), DMA_TO_DEVICE);
-       if (dma_mapping_error(orb->base.request_bus))
+       if (dma_mapping_error(device->card->device, orb->base.request_bus))
                goto fail_mapping_request;
 
        sbp2_send_orb(&orb->base, lu, node_id, generation,
        orb->page_table_bus =
                dma_map_single(device->card->device, orb->page_table,
                               sizeof(orb->page_table), DMA_TO_DEVICE);
-       if (dma_mapping_error(orb->page_table_bus))
+       if (dma_mapping_error(device->card->device, orb->page_table_bus))
                goto fail_page_table;
 
        /*
        orb->base.request_bus =
                dma_map_single(device->card->device, &orb->request,
                               sizeof(orb->request), DMA_TO_DEVICE);
-       if (dma_mapping_error(orb->base.request_bus))
+       if (dma_mapping_error(device->card->device, orb->base.request_bus))
                goto out;
 
        sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
 
 
        addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
                              tx->map_len, DMA_TO_DEVICE);
-       if (dma_mapping_error(addr)) {
+       if (dma_mapping_error(&dd->pcidev->dev, addr)) {
                ret = -EIO;
                goto unlock;
        }
 
 
        dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
                                DMA_TO_DEVICE);
-       if (dma_mapping_error(dma_addr)) {
+       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
                ret = -ENOMEM;
                goto free_unmap;
        }
                                     pages[j], 0, flen, DMA_TO_DEVICE);
                unsigned long fofs = addr & ~PAGE_MASK;
 
-               if (dma_mapping_error(dma_addr)) {
+               if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
                        ret = -ENOMEM;
                        goto done;
                }
                if (page) {
                        dma_addr = dma_map_page(&dd->pcidev->dev,
                                                page, 0, len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(dma_addr)) {
+                       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
                                ret = -ENOMEM;
                                goto free_pbc;
                        }
 
                return -ENOMEM;
        dev->eq_table.icm_dma  = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
+       if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
                __free_page(dev->eq_table.icm_page);
                return -ENOMEM;
        }
 
        pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
                        TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
 
-       return pci_dma_mapping_error(pluto->dma_addr);
+       return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
 }
 
 static void pluto_dma_unmap(struct pluto *pluto)
 
 
        host->align_addr = dma_map_single(mmc_dev(host->mmc),
                host->align_buffer, 128 * 4, direction);
-       if (dma_mapping_error(host->align_addr))
+       if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
                goto fail;
        BUG_ON(host->align_addr & 0x3);
 
 
        host->adma_addr = dma_map_single(mmc_dev(host->mmc),
                host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
-       if (dma_mapping_error(host->align_addr))
+       if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
                goto unmap_entries;
        BUG_ON(host->adma_addr & 0x3);
 
 
                        goto err;
 
                d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(d)) {
+               if (dma_mapping_error(NULL, d)) {
                        free_page((unsigned long)page);
                        goto err;
                }
                        goto err;
 
                d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
-               if (dma_mapping_error(d)) {
+               if (dma_mapping_error(NULL, d)) {
                        free_page((unsigned long)page);
                        goto err;
                }
 
 
        mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
                               PCI_DMA_FROMDEVICE);
-       if (unlikely(dma_mapping_error(mapping))) {
+       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
                __free_pages(page, PAGES_PER_SGE_SHIFT);
                return -ENOMEM;
        }
 
        mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
                                 PCI_DMA_FROMDEVICE);
-       if (unlikely(dma_mapping_error(mapping))) {
+       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
                dev_kfree_skb(skb);
                return -ENOMEM;
        }
 
        dma_addr_t mapping;
 
        mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
-       if (unlikely(pci_dma_mapping_error(mapping)))
+       if (unlikely(pci_dma_mapping_error(pdev, mapping)))
                return -ENOMEM;
 
        pci_unmap_addr_set(sd, dma_addr, mapping);
 
        rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
                RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
 
-       if (pci_dma_mapping_error(rx->dma_addr)) {
+       if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
                dev_kfree_skb_any(rx->skb);
                rx->skb = NULL;
                rx->dma_addr = 0;
 
                tx_ring->buffer_info[i].dma =
                        pci_map_single(pdev, skb->data, skb->len,
                                       PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) {
+               if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
                        ret_val = 4;
                        goto err_nomem;
                }
                rx_ring->buffer_info[i].dma =
                        pci_map_single(pdev, skb->data, 2048,
                                       PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) {
+               if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
                        ret_val = 8;
                        goto err_nomem;
                }
 
                buffer_info->dma = pci_map_single(pdev, skb->data,
                                                  adapter->rx_buffer_len,
                                                  PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        break;
                                                   ps_page->page,
                                                   0, PAGE_SIZE,
                                                   PCI_DMA_FROMDEVICE);
-                               if (pci_dma_mapping_error(ps_page->dma)) {
+                               if (pci_dma_mapping_error(pdev, ps_page->dma)) {
                                        dev_err(&adapter->pdev->dev,
                                          "RX DMA page map failed\n");
                                        adapter->rx_dma_failed++;
                buffer_info->dma = pci_map_single(pdev, skb->data,
                                                  adapter->rx_ps_bsize0,
                                                  PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        /* cleanup skb */
                                skb->data + offset,
                                size,
                                PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
                        dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
                        adapter->tx_dma_failed++;
                        return -1;
                                        offset,
                                        size,
                                        PCI_DMA_TODEVICE);
-                       if (pci_dma_mapping_error(buffer_info->dma)) {
+                       if (pci_dma_mapping_error(adapter->pdev,
+                                                 buffer_info->dma)) {
                                dev_err(&adapter->pdev->dev,
                                        "TX DMA page map failed\n");
                                adapter->tx_dma_failed++;
 
                dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
                                pool->buff_size, DMA_FROM_DEVICE);
 
-               if (dma_mapping_error(dma_addr))
+               if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
                        goto failure;
 
                pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
                pool->consumer_index = pool->size - 1;
        else
                pool->consumer_index--;
-       if (!dma_mapping_error(dma_addr))
+       if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
                dma_unmap_single(&adapter->vdev->dev,
                                 pool->dma_addr[index], pool->buff_size,
                                 DMA_FROM_DEVICE);
 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
 {
        int i;
+       struct device *dev = &adapter->vdev->dev;
 
        if(adapter->buffer_list_addr != NULL) {
-               if(!dma_mapping_error(adapter->buffer_list_dma)) {
-                       dma_unmap_single(&adapter->vdev->dev,
-                                       adapter->buffer_list_dma, 4096,
+               if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
+                       dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
                                        DMA_BIDIRECTIONAL);
                        adapter->buffer_list_dma = DMA_ERROR_CODE;
                }
        }
 
        if(adapter->filter_list_addr != NULL) {
-               if(!dma_mapping_error(adapter->filter_list_dma)) {
-                       dma_unmap_single(&adapter->vdev->dev,
-                                       adapter->filter_list_dma, 4096,
+               if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
+                       dma_unmap_single(dev, adapter->filter_list_dma, 4096,
                                        DMA_BIDIRECTIONAL);
                        adapter->filter_list_dma = DMA_ERROR_CODE;
                }
        }
 
        if(adapter->rx_queue.queue_addr != NULL) {
-               if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
-                       dma_unmap_single(&adapter->vdev->dev,
+               if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+                       dma_unmap_single(dev,
                                        adapter->rx_queue.queue_dma,
                                        adapter->rx_queue.queue_len,
                                        DMA_BIDIRECTIONAL);
        int rc;
        union ibmveth_buf_desc rxq_desc;
        int i;
+       struct device *dev;
 
        ibmveth_debug_printk("open starting\n");
 
                return -ENOMEM;
        }
 
-       adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
+       dev = &adapter->vdev->dev;
+
+       adapter->buffer_list_dma = dma_map_single(dev,
                        adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
-       adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
+       adapter->filter_list_dma = dma_map_single(dev,
                        adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
-       adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
+       adapter->rx_queue.queue_dma = dma_map_single(dev,
                        adapter->rx_queue.queue_addr,
                        adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
 
-       if((dma_mapping_error(adapter->buffer_list_dma) ) ||
-          (dma_mapping_error(adapter->filter_list_dma)) ||
-          (dma_mapping_error(adapter->rx_queue.queue_dma))) {
+       if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+           (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+           (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
                ibmveth_error_printk("unable to map filter or buffer list pages\n");
                ibmveth_cleanup(adapter);
                napi_disable(&adapter->napi);
        adapter->bounce_buffer_dma =
            dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
                           netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(adapter->bounce_buffer_dma)) {
+       if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
                ibmveth_error_printk("unable to map bounce buffer\n");
                ibmveth_cleanup(adapter);
                napi_disable(&adapter->napi);
                buf[1] = 0;
        }
 
-       if (dma_mapping_error(data_dma_addr)) {
+       if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
                        ibmveth_error_printk("tx: unable to map xmit buffer\n");
                skb_copy_from_linear_data(skb, adapter->bounce_buffer,
 
        msg->data.addr[0] = dma_map_single(port->dev, skb->data,
                                skb->len, DMA_TO_DEVICE);
 
-       if (dma_mapping_error(msg->data.addr[0]))
+       if (dma_mapping_error(port->dev, msg->data.addr[0]))
                goto recycle_and_drop;
 
        msg->dev = port->dev;
                dma_address = msg->data.addr[0];
                dma_length = msg->data.len[0];
 
-               if (!dma_mapping_error(dma_address))
+               if (!dma_mapping_error(msg->dev, dma_address))
                        dma_unmap_single(msg->dev, dma_address, dma_length,
                                        DMA_TO_DEVICE);
 
 
                return -ENOMEM;
        priv->eq_table.icm_dma  = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
+       if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
                __free_page(priv->eq_table.icm_page);
                return -ENOMEM;
        }
 
                                     mac->bufsz - LOCAL_SKB_ALIGN,
                                     PCI_DMA_FROMDEVICE);
 
-               if (unlikely(dma_mapping_error(dma))) {
+               if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
                        dev_kfree_skb_irq(info->skb);
                        break;
                }
        map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
                                PCI_DMA_TODEVICE);
        map_size[0] = skb_headlen(skb);
-       if (dma_mapping_error(map[0]))
+       if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
                goto out_err_nolock;
 
        for (i = 0; i < nfrags; i++) {
                                        frag->page_offset, frag->size,
                                        PCI_DMA_TODEVICE);
                map_size[i+1] = frag->size;
-               if (dma_mapping_error(map[i+1])) {
+               if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
                        nfrags = i;
                        goto out_err_nolock;
                }
 
                                             qdev->lrg_buffer_len -
                                             QL_HEADER_SPACE,
                                             PCI_DMA_FROMDEVICE);
-                       err = pci_dma_mapping_error(map);
+                       err = pci_dma_mapping_error(qdev->pdev, map);
                        if(err) {
                                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
                                       qdev->ndev->name, err);
                                                     QL_HEADER_SPACE,
                                                     PCI_DMA_FROMDEVICE);
 
-                               err = pci_dma_mapping_error(map);
+                               err = pci_dma_mapping_error(qdev->pdev, map);
                                if(err) {
                                        printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
                                               qdev->ndev->name, err);
         */
        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
 
-       err = pci_dma_mapping_error(map);
+       err = pci_dma_mapping_error(qdev->pdev, map);
        if(err) {
                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
                       qdev->ndev->name, err);
                                                     sizeof(struct oal),
                                                     PCI_DMA_TODEVICE);
 
-                               err = pci_dma_mapping_error(map);
+                               err = pci_dma_mapping_error(qdev->pdev, map);
                                if(err) {
 
                                        printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
                                         frag->page_offset, frag->size,
                                         PCI_DMA_TODEVICE);
 
-                       err = pci_dma_mapping_error(map);
+                       err = pci_dma_mapping_error(qdev->pdev, map);
                        if(err) {
                                printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
                                       qdev->ndev->name, err);
                                             QL_HEADER_SPACE,
                                             PCI_DMA_FROMDEVICE);
 
-                       err = pci_dma_mapping_error(map);
+                       err = pci_dma_mapping_error(qdev->pdev, map);
                        if(err) {
                                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
                                       qdev->ndev->name, err);
 
  *   Return Value:
  *  SUCCESS on success or an appropriate -ve value on failure.
  */
-
-static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
+static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
+                               int from_card_up)
 {
        struct sk_buff *skb;
        struct RxD_t *rxdp;
                        rxdp1->Buffer0_ptr = pci_map_single
                            (ring->pdev, skb->data, size - NET_IP_ALIGN,
                                PCI_DMA_FROMDEVICE);
-                       if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+                       if (pci_dma_mapping_error(nic->pdev,
+                                               rxdp1->Buffer0_ptr))
                                goto pci_map_failed;
 
                        rxdp->Control_2 =
                                rxdp3->Buffer0_ptr =
                                   pci_map_single(ring->pdev, ba->ba_0,
                                        BUF0_LEN, PCI_DMA_FROMDEVICE);
-                               if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
+                       if (pci_dma_mapping_error(nic->pdev,
+                                               rxdp3->Buffer0_ptr))
                                        goto pci_map_failed;
                        } else
                                pci_dma_sync_single_for_device(ring->pdev,
                                (ring->pdev, skb->data, ring->mtu + 4,
                                                PCI_DMA_FROMDEVICE);
 
-                               if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+                               if (pci_dma_mapping_error(nic->pdev,
+                                                       rxdp3->Buffer2_ptr))
                                        goto pci_map_failed;
 
                                if (from_card_up) {
                                                ba->ba_1, BUF1_LEN,
                                                PCI_DMA_FROMDEVICE);
 
-                                       if (pci_dma_mapping_error
-                                               (rxdp3->Buffer1_ptr)) {
+                                       if (pci_dma_mapping_error(nic->pdev,
+                                               rxdp3->Buffer1_ptr)) {
                                                pci_unmap_single
                                                        (ring->pdev,
                                                    (dma_addr_t)(unsigned long)
        }
 }
 
-static int s2io_chk_rx_buffers(struct ring_info *ring)
+static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
 {
-       if (fill_rx_buffers(ring, 0) == -ENOMEM) {
+       if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
                DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
                DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
        }
                return 0;
 
        pkts_processed = rx_intr_handler(ring, budget);
-       s2io_chk_rx_buffers(ring);
+       s2io_chk_rx_buffers(nic, ring);
 
        if (pkts_processed < budget_org) {
                netif_rx_complete(dev, napi);
        for (i = 0; i < config->rx_ring_num; i++) {
                ring = &mac_control->rings[i];
                ring_pkts_processed = rx_intr_handler(ring, budget);
-               s2io_chk_rx_buffers(ring);
+               s2io_chk_rx_buffers(nic, ring);
                pkts_processed += ring_pkts_processed;
                budget -= ring_pkts_processed;
                if (budget <= 0)
                rx_intr_handler(&mac_control->rings[i], 0);
 
        for (i = 0; i < config->rx_ring_num; i++) {
-               if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
+               if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
+                               -ENOMEM) {
                        DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
                        DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
                        break;
                txdp->Buffer_Pointer = pci_map_single(sp->pdev,
                                        fifo->ufo_in_band_v,
                                        sizeof(u64), PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+               if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
                        goto pci_map_failed;
                txdp++;
        }
 
        txdp->Buffer_Pointer = pci_map_single
            (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+       if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
                goto pci_map_failed;
 
        txdp->Host_Control = (unsigned long) skb;
                netif_rx_schedule(dev, &ring->napi);
        } else {
                rx_intr_handler(ring, 0);
-               s2io_chk_rx_buffers(ring);
+               s2io_chk_rx_buffers(sp, ring);
        }
 
        return IRQ_HANDLED;
                 */
                if (!config->napi) {
                        for (i = 0; i < config->rx_ring_num; i++)
-                               s2io_chk_rx_buffers(&mac_control->rings[i]);
+                               s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
                }
                writeq(sp->general_int_mask, &bar0->general_int_mask);
                readl(&bar0->general_int_status);
                                pci_map_single( sp->pdev, (*skb)->data,
                                        size - NET_IP_ALIGN,
                                        PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+                       if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
                                goto memalloc_failed;
                        rxdp->Host_Control = (unsigned long) (*skb);
                }
                                pci_map_single(sp->pdev, (*skb)->data,
                                               dev->mtu + 4,
                                               PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+                       if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
                                goto memalloc_failed;
                        rxdp3->Buffer0_ptr = *temp0 =
                                pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
                                                PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
+                       if (pci_dma_mapping_error(sp->pdev,
+                                               rxdp3->Buffer0_ptr)) {
                                pci_unmap_single (sp->pdev,
                                        (dma_addr_t)rxdp3->Buffer2_ptr,
                                        dev->mtu + 4, PCI_DMA_FROMDEVICE);
                        rxdp3->Buffer1_ptr = *temp1 =
                                pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
                                                PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
+                       if (pci_dma_mapping_error(sp->pdev,
+                                               rxdp3->Buffer1_ptr)) {
                                pci_unmap_single (sp->pdev,
                                        (dma_addr_t)rxdp3->Buffer0_ptr,
                                        BUF0_LEN, PCI_DMA_FROMDEVICE);
 
        for (i = 0; i < config->rx_ring_num; i++) {
                mac_control->rings[i].mtu = dev->mtu;
-               ret = fill_rx_buffers(&mac_control->rings[i], 1);
+               ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
                if (ret) {
                        DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
                                  dev->name);
 
                                          rx_buf->data, rx_buf->len,
                                          PCI_DMA_FROMDEVICE);
 
-       if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
+       if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
                dev_kfree_skb_any(rx_buf->skb);
                rx_buf->skb = NULL;
                return -EIO;
                                        0, efx_rx_buf_size(efx),
                                        PCI_DMA_FROMDEVICE);
 
-               if (unlikely(pci_dma_mapping_error(dma_addr))) {
+               if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
                        __free_pages(rx_buf->page, efx->rx_buffer_order);
                        rx_buf->page = NULL;
                        return -EIO;
 
 
        /* Process all fragments */
        while (1) {
-               if (unlikely(pci_dma_mapping_error(dma_addr)))
+               if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
                        goto pci_err;
 
                /* Store fields for marking in the per-fragment final
        tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
                                        TSOH_BUFFER(tsoh), header_len,
                                        PCI_DMA_TODEVICE);
-       if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
+       if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
+                                          tsoh->dma_addr))) {
                kfree(tsoh);
                return NULL;
        }
 
        st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
                                          len, PCI_DMA_TODEVICE);
-       if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
+       if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
                st->ifc.unmap_len = len;
                st->ifc.len = len;
                st->ifc.dma_addr = st->ifc.unmap_addr;
 
        /* iommu-map the skb */
        buf = pci_map_single(card->pdev, descr->skb->data,
                        SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(buf)) {
+       if (pci_dma_mapping_error(card->pdev, buf)) {
                dev_kfree_skb_any(descr->skb);
                descr->skb = NULL;
                if (netif_msg_rx_err(card) && net_ratelimit())
        unsigned long flags;
 
        buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(buf)) {
+       if (pci_dma_mapping_error(card->pdev, buf)) {
                if (netif_msg_tx_err(card) && net_ratelimit())
                        dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
                                  "Dropping packet\n", skb->data, skb->len);
 
                return NULL;
        *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
                                     PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(*dma_handle)) {
+       if (pci_dma_mapping_error(hwdev, *dma_handle)) {
                free_page((unsigned long)buf);
                return NULL;
        }
                return NULL;
        *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
                                     PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(*dma_handle)) {
+       if (pci_dma_mapping_error(hwdev, *dma_handle)) {
                dev_kfree_skb_any(skb);
                return NULL;
        }
 
                bf->skb = skb;
                bf->skbaddr = pci_map_single(sc->pdev,
                        skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
-               if (unlikely(pci_dma_mapping_error(bf->skbaddr))) {
+               if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
                        ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
                        dev_kfree_skb(skb);
                        bf->skb = NULL;
        ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
                        "skbaddr %llx\n", skb, skb->data, skb->len,
                        (unsigned long long)bf->skbaddr);
-       if (pci_dma_mapping_error(bf->skbaddr)) {
+       if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
                ATH5K_ERR(sc, "beacon DMA mapping failed\n");
                return -EIO;
        }
 
        crq->msg_token = dma_map_single(dev, crq->msgs,
                                        PAGE_SIZE, DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(crq->msg_token))
+       if (dma_mapping_error(dev, crq->msg_token))
                goto map_failed;
 
        retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
                                            async_q->size * sizeof(*async_q->msgs),
                                            DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(async_q->msg_token)) {
+       if (dma_mapping_error(dev, async_q->msg_token)) {
                dev_err(dev, "Failed to map async queue\n");
                goto free_async_crq;
        }
 
                                            sizeof(hostdata->madapter_info),
                                            DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(req->buffer)) {
+       if (dma_mapping_error(hostdata->dev, req->buffer)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
                        dev_err(hostdata->dev,
                                "Unable to map request_buffer for "
                                                    length,
                                                    DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(host_config->buffer)) {
+       if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
                        dev_err(hostdata->dev,
                                "dma_mapping error getting host config\n");
 
                                          queue->size * sizeof(*queue->msgs),
                                          DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(queue->msg_token))
+       if (dma_mapping_error(target->dev, queue->msg_token))
                goto map_failed;
 
        err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
 
                                          queue->size * sizeof(*queue->msgs),
                                          DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(queue->msg_token))
+       if (dma_mapping_error(hostdata->dev, queue->msg_token))
                goto map_failed;
 
        gather_partition_info();
 
                xfer->tx_dma = dma_map_single(dev,
                                (void *) xfer->tx_buf, xfer->len,
                                DMA_TO_DEVICE);
-               if (dma_mapping_error(xfer->tx_dma))
+               if (dma_mapping_error(dev, xfer->tx_dma))
                        return -ENOMEM;
        }
        if (xfer->rx_buf) {
                xfer->rx_dma = dma_map_single(dev,
                                xfer->rx_buf, xfer->len,
                                DMA_FROM_DEVICE);
-               if (dma_mapping_error(xfer->rx_dma)) {
+               if (dma_mapping_error(dev, xfer->rx_dma)) {
                        if (xfer->tx_buf)
                                dma_unmap_single(dev,
                                                xfer->tx_dma, xfer->len,
 
        hw->dma_rx_tmpbuf_size = size;
        hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
                        size, DMA_FROM_DEVICE);
-       if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) {
+       if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
                kfree(hw->dma_rx_tmpbuf);
                hw->dma_rx_tmpbuf = 0;
                hw->dma_rx_tmpbuf_size = 0;
                        dma_rx_addr = dma_map_single(hw->dev,
                                        (void *)t->rx_buf,
                                        t->len, DMA_FROM_DEVICE);
-                       if (dma_mapping_error(dma_rx_addr))
+                       if (dma_mapping_error(hw->dev, dma_rx_addr))
                                dev_err(hw->dev, "rx dma map error\n");
                }
        } else {
                        dma_tx_addr = dma_map_single(hw->dev,
                                        (void *)t->tx_buf,
                                        t->len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(dma_tx_addr))
+                       if (dma_mapping_error(hw->dev, dma_tx_addr))
                                dev_err(hw->dev, "tx dma map error\n");
                }
        } else {
 
                if (tx_buf != NULL) {
                        t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
                                        len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(t->tx_dma)) {
+                       if (dma_mapping_error(&spi->dev, t->tx_dma)) {
                                dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
                                                'T', len);
                                return -EINVAL;
                if (rx_buf != NULL) {
                        t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
                                        DMA_FROM_DEVICE);
-                       if (dma_mapping_error(t->rx_dma)) {
+                       if (dma_mapping_error(&spi->dev, t->rx_dma)) {
                                dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
                                                'R', len);
                                if (tx_buf != NULL)
 
        drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
                                                drv_data->rx_map_len,
                                                DMA_FROM_DEVICE);
-       if (dma_mapping_error(drv_data->rx_dma))
+       if (dma_mapping_error(dev, drv_data->rx_dma))
                return 0;
 
        /* Stream map the tx buffer */
                                                drv_data->tx_map_len,
                                                DMA_TO_DEVICE);
 
-       if (dma_mapping_error(drv_data->tx_dma)) {
+       if (dma_mapping_error(dev, drv_data->tx_dma)) {
                dma_unmap_single(dev, drv_data->rx_dma,
                                        drv_data->rx_map_len, DMA_FROM_DEVICE);
                return 0;
 
                                                        buf,
                                                        drv_data->tx_map_len,
                                                        DMA_TO_DEVICE);
-                       if (dma_mapping_error(drv_data->tx_dma))
+                       if (dma_mapping_error(dev, drv_data->tx_dma))
                                return -1;
 
                        drv_data->tx_dma_needs_unmap = 1;
                                        buf,
                                        drv_data->len,
                                        DMA_FROM_DEVICE);
-               if (dma_mapping_error(drv_data->rx_dma))
+               if (dma_mapping_error(dev, drv_data->rx_dma))
                        return -1;
                drv_data->rx_dma_needs_unmap = 1;
        }
                                        buf,
                                        drv_data->tx_map_len,
                                        DMA_TO_DEVICE);
-       if (dma_mapping_error(drv_data->tx_dma)) {
+       if (dma_mapping_error(dev, drv_data->tx_dma)) {
                if (drv_data->rx_dma) {
                        dma_unmap_single(dev,
                                        drv_data->rx_dma,
 
                pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
 #define dma_supported(dev, mask)                       \
                pci_dma_supported(alpha_gendev_to_pci(dev), mask)
-#define dma_mapping_error(addr)                                \
-               pci_dma_mapping_error(addr)
+#define dma_mapping_error(dev, addr)                           \
+               pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr)
 
 #else  /* no PCI - no IOMMU. */
 
 #define dma_unmap_page(dev, addr, size, dir)   ((void)0)
 #define dma_unmap_sg(dev, sg, nents, dir)      ((void)0)
 
-#define dma_mapping_error(addr)  (0)
+#define dma_mapping_error(dev, addr)  (0)
 
 #endif /* !CONFIG_PCI */
 
 
 /* Test for pci_map_single or pci_map_page having generated an error.  */
 
 static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
 {
        return dma_addr == 0;
 }
 
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return dma_addr == ~0;
 }
 
 /*
  * dma_map_single can't fail as it is implemented now.
  */
-static inline int dma_mapping_error(dma_addr_t addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
 {
        return 0;
 }
 
 }
 
 static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
 
 }
 
 static inline
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
 
 #define dma_sync_sg_for_device dma_sync_sg_for_cpu
 
 extern int
-dma_mapping_error(dma_addr_t dma_addr);
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 extern int
 dma_supported(struct device *dev, u64 mask);
 
 }
 
 static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return pci_dma_mapping_error(dma_addr);
+       return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
 }
 
 
 
 }
 
 static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
 {
-       return dma_mapping_error(dma_addr);
+       return dma_mapping_error(&pdev->dev, dma_addr);
 }
 
 #endif
 
 typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
 typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
 typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
-typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
+typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
 typedef int ia64_mv_dma_supported (struct device *, u64);
 
 typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
 
 {
 }
 
-static inline int dma_mapping_error(dma_addr_t handle)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
 {
        return 0;
 }
 
        int nelems, enum dma_data_direction direction);
 extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
        int nelems, enum dma_data_direction direction);
-extern int dma_mapping_error(dma_addr_t dma_addr);
+extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 extern int dma_supported(struct device *dev, u64 mask);
 
 static inline int
 
 }
 
 static inline
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
 
 #endif
 
 /* At the moment, we panic on error for IOMMU resource exaustion */
-#define dma_mapping_error(x)   0
+#define dma_mapping_error(dev, x)      0
 
 #endif
 
                __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 }
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
 #ifdef CONFIG_PPC64
        return (dma_addr == DMA_ERROR_CODE);
 
        return L1_CACHE_BYTES;
 }
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return dma_addr == 0;
 }
 
        /* No flushing needed to sync cpu writes to the device.  */
 }
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return (dma_addr == DMA_ERROR_CODE);
 }
 
 
 #define PCI_DMA_ERROR_CODE      (~(dma_addr_t)0x0)
 
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+static inline int pci_dma_mapping_error(struct pci_dev *pdev,
+                                       dma_addr_t dma_addr)
 {
         return (dma_addr == PCI_DMA_ERROR_CODE);
 }
 
 #define PCI64_REQUIRED_MASK    (~(dma64_addr_t)0)
 #define PCI64_ADDR_BASE                0xfffc000000000000UL
 
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+static inline int pci_dma_mapping_error(struct pci_dev *pdev,
+                                       dma_addr_t dma_addr)
 {
-       return dma_mapping_error(dma_addr);
+       return dma_mapping_error(&pdev->dev, dma_addr);
 }
 
 #ifdef CONFIG_PCI
 
 #ifdef CONFIG_ACPI
        void    *acpi_handle;
 #endif
+#ifdef CONFIG_X86_64
+struct dma_mapping_ops *dma_ops;
+#endif
 #ifdef CONFIG_DMAR
        void *iommu; /* hook for IOMMU specific extension */
 #endif
 
 extern int force_iommu;
 
 struct dma_mapping_ops {
-       int             (*mapping_error)(dma_addr_t dma_addr);
+       int             (*mapping_error)(struct device *dev,
+                                        dma_addr_t dma_addr);
        void*           (*alloc_coherent)(struct device *dev, size_t size,
                                dma_addr_t *dma_handle, gfp_t gfp);
        void            (*free_coherent)(struct device *dev, size_t size,
        int             is_phys;
 };
 
-extern const struct dma_mapping_ops *dma_ops;
+extern struct dma_mapping_ops *dma_ops;
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
 {
-       if (dma_ops->mapping_error)
-               return dma_ops->mapping_error(dma_addr);
+#ifdef CONFIG_X86_32
+       return dma_ops;
+#else
+       if (unlikely(!dev) || !dev->archdata.dma_ops)
+               return dma_ops;
+       else
+               return dev->archdata.dma_ops;
+#endif
+}
+
+/* Make sure we keep the same behaviour */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+#ifdef CONFIG_X86_32
+       return 0;
+#else
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+       if (ops->mapping_error)
+               return ops->mapping_error(dev, dma_addr);
 
        return (dma_addr == bad_dma_address);
+#endif
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 dma_map_single(struct device *hwdev, void *ptr, size_t size,
               int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
+       return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
 }
 
 static inline void
 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
                 int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->unmap_single)
-               dma_ops->unmap_single(dev, addr, size, direction);
+       if (ops->unmap_single)
+               ops->unmap_single(dev, addr, size, direction);
 }
 
 static inline int
 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
           int nents, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       return dma_ops->map_sg(hwdev, sg, nents, direction);
+       return ops->map_sg(hwdev, sg, nents, direction);
 }
 
 static inline void
 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
             int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->unmap_sg)
-               dma_ops->unmap_sg(hwdev, sg, nents, direction);
+       if (ops->unmap_sg)
+               ops->unmap_sg(hwdev, sg, nents, direction);
 }
 
 static inline void
 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
                        size_t size, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_single_for_cpu)
-               dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
-                                            direction);
+       if (ops->sync_single_for_cpu)
+               ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
        flush_write_buffers();
 }
 
 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
                           size_t size, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_single_for_device)
-               dma_ops->sync_single_for_device(hwdev, dma_handle, size,
-                                               direction);
+       if (ops->sync_single_for_device)
+               ops->sync_single_for_device(hwdev, dma_handle, size, direction);
        flush_write_buffers();
 }
 
 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
                              unsigned long offset, size_t size, int direction)
 {
-       BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_single_range_for_cpu)
-               dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
-                                                  size, direction);
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 
+       BUG_ON(!valid_dma_direction(direction));
+       if (ops->sync_single_range_for_cpu)
+               ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
+                                              size, direction);
        flush_write_buffers();
 }
 
                                 unsigned long offset, size_t size,
                                 int direction)
 {
-       BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_single_range_for_device)
-               dma_ops->sync_single_range_for_device(hwdev, dma_handle,
-                                                     offset, size, direction);
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 
+       BUG_ON(!valid_dma_direction(direction));
+       if (ops->sync_single_range_for_device)
+               ops->sync_single_range_for_device(hwdev, dma_handle,
+                                                 offset, size, direction);
        flush_write_buffers();
 }
 
 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
                    int nelems, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_sg_for_cpu)
-               dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
+       if (ops->sync_sg_for_cpu)
+               ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
        flush_write_buffers();
 }
 
 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
                       int nelems, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_sg_for_device)
-               dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
+       if (ops->sync_sg_for_device)
+               ops->sync_sg_for_device(hwdev, sg, nelems, direction);
 
        flush_write_buffers();
 }
                                      size_t offset, size_t size,
                                      int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
        BUG_ON(!valid_dma_direction(direction));
-       return dma_ops->map_single(dev, page_to_phys(page)+offset,
-                                  size, direction);
+       return ops->map_single(dev, page_to_phys(page) + offset,
+                              size, direction);
 }
 
 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 
                          int nents, int direction);
 extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
                             int nents, int direction);
-extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
+extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
 extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
                                  void *vaddr, dma_addr_t dma_handle);
 extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
 
                consistent_sync(sg_virt(sg), sg->length, dir);
 }
 static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
 
        }
 
        dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
-       if (!dma_mapping_error(dma_addr)) {
+       if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
                if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
                        *mptr++ = cpu_to_le32(0x7C020002);
 
 {
        switch (dev->bus->bustype) {
        case SSB_BUSTYPE_PCI:
-               return pci_dma_mapping_error(addr);
+               return pci_dma_mapping_error(dev->bus->host_pci, addr);
        case SSB_BUSTYPE_SSB:
-               return dma_mapping_error(addr);
+               return dma_mapping_error(dev->dev, addr);
        default:
                __ssb_dma_not_implemented(dev);
        }
 
 {
        if (dev->dma_ops)
                return dev->dma_ops->mapping_error(dev, dma_addr);
-       return dma_mapping_error(dma_addr);
+       return dma_mapping_error(dev->dma_device, dma_addr);
 }
 
 /**
 
                 */
                dma_addr_t handle;
                handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
-               if (swiotlb_dma_mapping_error(handle))
+               if (swiotlb_dma_mapping_error(hwdev, handle))
                        return NULL;
 
                ret = bus_to_virt(handle);
 }
 
 int
-swiotlb_dma_mapping_error(dma_addr_t dma_addr)
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
 {
        return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
 }
 
                                          (void *)
                                          vec->sge[xdr_sge_no].iov_base + sge_off,
                                          sge_bytes, DMA_TO_DEVICE);
-               if (dma_mapping_error(sge[sge_no].addr))
+               if (dma_mapping_error(xprt->sc_cm_id->device->dma_device,
+                                       sge[sge_no].addr))
                        goto err;
                sge_off = 0;
                sge_no++;