unsigned long offset,
                                                enum dma_data_direction dir)
 {
-       dma_sync_single_range_for_device(sdev->dev, dma_base,
+       dma_sync_single_range_for_device(sdev->dma_dev, dma_base,
                                         offset & dma_desc_align_mask,
                                         dma_desc_sync_size, dir);
 }
                                             unsigned long offset,
                                             enum dma_data_direction dir)
 {
-       dma_sync_single_range_for_cpu(sdev->dev, dma_base,
+       dma_sync_single_range_for_cpu(sdev->dma_dev, dma_base,
                                      offset & dma_desc_align_mask,
                                      dma_desc_sync_size, dir);
 }
 
                BUG_ON(skb == NULL);
 
-               dma_unmap_single(bp->sdev->dev,
+               dma_unmap_single(bp->sdev->dma_dev,
                                 rp->mapping,
                                 skb->len,
                                 DMA_TO_DEVICE);
        if (skb == NULL)
                return -ENOMEM;
 
-       mapping = dma_map_single(bp->sdev->dev, skb->data,
+       mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
                                 RX_PKT_BUF_SZ,
                                 DMA_FROM_DEVICE);
 
                mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
                /* Sigh... */
                if (!dma_mapping_error(mapping))
-                       dma_unmap_single(bp->sdev->dev, mapping,
+                       dma_unmap_single(bp->sdev->dma_dev, mapping,
                                        RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
                skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
                if (skb == NULL)
                        return -ENOMEM;
-               mapping = dma_map_single(bp->sdev->dev, skb->data,
+               mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
                                         RX_PKT_BUF_SZ,
                                         DMA_FROM_DEVICE);
                if (dma_mapping_error(mapping) ||
                        mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
                        if (!dma_mapping_error(mapping))
-                               dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+                               dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
                        dev_kfree_skb_any(skb);
                        return -ENOMEM;
                }
                                             dest_idx * sizeof(dest_desc),
                                             DMA_BIDIRECTIONAL);
 
-       dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr),
+       dma_sync_single_for_device(bp->sdev->dma_dev, le32_to_cpu(src_desc->addr),
                                   RX_PKT_BUF_SZ,
                                   DMA_FROM_DEVICE);
 }
                struct rx_header *rh;
                u16 len;
 
-               dma_sync_single_for_cpu(bp->sdev->dev, map,
+               dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
                                            RX_PKT_BUF_SZ,
                                            DMA_FROM_DEVICE);
                rh = (struct rx_header *) skb->data;
                        skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
                        if (skb_size < 0)
                                goto drop_it;
-                       dma_unmap_single(bp->sdev->dev, map,
+                       dma_unmap_single(bp->sdev->dma_dev, map,
                                         skb_size, DMA_FROM_DEVICE);
                        /* Leave out rx_header */
                        skb_put(skb, len + RX_PKT_OFFSET);
                goto err_out;
        }
 
-       mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
+       mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
        if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
                struct sk_buff *bounce_skb;
 
                /* Chip can't handle DMA to/from >1GB, use bounce buffer */
                if (!dma_mapping_error(mapping))
-                       dma_unmap_single(bp->sdev->dev, mapping, len,
+                       dma_unmap_single(bp->sdev->dma_dev, mapping, len,
                                        DMA_TO_DEVICE);
 
                bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb)
                        goto err_out;
 
-               mapping = dma_map_single(bp->sdev->dev, bounce_skb->data,
+               mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
                                         len, DMA_TO_DEVICE);
                if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
                        if (!dma_mapping_error(mapping))
-                               dma_unmap_single(bp->sdev->dev, mapping,
+                               dma_unmap_single(bp->sdev->dma_dev, mapping,
                                         len, DMA_TO_DEVICE);
                        dev_kfree_skb_any(bounce_skb);
                        goto err_out;
 
                if (rp->skb == NULL)
                        continue;
-               dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ,
+               dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
                                        DMA_FROM_DEVICE);
                dev_kfree_skb_any(rp->skb);
                rp->skb = NULL;
 
                if (rp->skb == NULL)
                        continue;
-               dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len,
+               dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
                                        DMA_TO_DEVICE);
                dev_kfree_skb_any(rp->skb);
                rp->skb = NULL;
        memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
 
        if (bp->flags & B44_FLAG_RX_RING_HACK)
-               dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma,
+               dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
                                          DMA_TABLE_BYTES,
                                          DMA_BIDIRECTIONAL);
 
        if (bp->flags & B44_FLAG_TX_RING_HACK)
-               dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma,
+               dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
                                          DMA_TABLE_BYTES,
                                          DMA_TO_DEVICE);
 
        bp->tx_buffers = NULL;
        if (bp->rx_ring) {
                if (bp->flags & B44_FLAG_RX_RING_HACK) {
-                       dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma,
+                       dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
                                        DMA_TABLE_BYTES,
                                        DMA_BIDIRECTIONAL);
                        kfree(bp->rx_ring);
                } else
-                       dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
+                       dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
                                            bp->rx_ring, bp->rx_ring_dma);
                bp->rx_ring = NULL;
                bp->flags &= ~B44_FLAG_RX_RING_HACK;
        }
        if (bp->tx_ring) {
                if (bp->flags & B44_FLAG_TX_RING_HACK) {
-                       dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma,
+                       dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
                                        DMA_TABLE_BYTES,
                                        DMA_TO_DEVICE);
                        kfree(bp->tx_ring);
                } else
-                       dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
+                       dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
                                            bp->tx_ring, bp->tx_ring_dma);
                bp->tx_ring = NULL;
                bp->flags &= ~B44_FLAG_TX_RING_HACK;
                goto out_err;
 
        size = DMA_TABLE_BYTES;
-       bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp);
+       bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp);
        if (!bp->rx_ring) {
                /* Allocation may have failed due to pci_alloc_consistent
                   insisting on use of GFP_DMA, which is more restrictive
                if (!rx_ring)
                        goto out_err;
 
-               rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring,
+               rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
                                            DMA_TABLE_BYTES,
                                            DMA_BIDIRECTIONAL);
 
                bp->flags |= B44_FLAG_RX_RING_HACK;
        }
 
-       bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp);
+       bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->tx_ring_dma, gfp);
        if (!bp->tx_ring) {
                /* Allocation may have failed due to dma_alloc_coherent
                   insisting on use of GFP_DMA, which is more restrictive
                if (!tx_ring)
                        goto out_err;
 
-               tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring,
+               tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
                                            DMA_TABLE_BYTES,
                                            DMA_TO_DEVICE);
 
 
        dma_addr_t dmaaddr;
 
        if (tx) {
-               dmaaddr = dma_map_single(ring->dev->dev->dev,
+               dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
                                         buf, len, DMA_TO_DEVICE);
        } else {
-               dmaaddr = dma_map_single(ring->dev->dev->dev,
+               dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
                                         buf, len, DMA_FROM_DEVICE);
        }
 
                          dma_addr_t addr, size_t len, int tx)
 {
        if (tx) {
-               dma_unmap_single(ring->dev->dev->dev, addr, len, DMA_TO_DEVICE);
+               dma_unmap_single(ring->dev->dev->dma_dev,
+                                addr, len, DMA_TO_DEVICE);
        } else {
-               dma_unmap_single(ring->dev->dev->dev,
+               dma_unmap_single(ring->dev->dev->dma_dev,
                                 addr, len, DMA_FROM_DEVICE);
        }
 }
                                 dma_addr_t addr, size_t len)
 {
        B43_WARN_ON(ring->tx);
-       dma_sync_single_for_cpu(ring->dev->dev->dev,
+       dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
                                addr, len, DMA_FROM_DEVICE);
 }
 
                                    dma_addr_t addr, size_t len)
 {
        B43_WARN_ON(ring->tx);
-       dma_sync_single_for_device(ring->dev->dev->dev,
+       dma_sync_single_for_device(ring->dev->dev->dma_dev,
                                   addr, len, DMA_FROM_DEVICE);
 }
 
 
 static int alloc_ringmemory(struct b43_dmaring *ring)
 {
-       struct device *dev = ring->dev->dev->dev;
+       struct device *dma_dev = ring->dev->dev->dma_dev;
        gfp_t flags = GFP_KERNEL;
 
        /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
         */
        if (ring->type == B43_DMA_64BIT)
                flags |= GFP_DMA;
-       ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
+       ring->descbase = dma_alloc_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
                                            &(ring->dmabase), flags);
        if (!ring->descbase) {
                b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
 
 static void free_ringmemory(struct b43_dmaring *ring)
 {
-       struct device *dev = ring->dev->dev->dev;
+       struct device *dma_dev = ring->dev->dev->dma_dev;
 
-       dma_free_coherent(dev, B43_DMA_RINGMEMSIZE,
+       dma_free_coherent(dma_dev, B43_DMA_RINGMEMSIZE,
                          ring->descbase, ring->dmabase);
 }
 
                        goto err_kfree_meta;
 
                /* test for ability to dma to txhdr_cache */
-               dma_test = dma_map_single(dev->dev->dev,
+               dma_test = dma_map_single(dev->dev->dma_dev,
                                          ring->txhdr_cache,
                                          b43_txhdr_size(dev),
                                          DMA_TO_DEVICE);
                        if (!ring->txhdr_cache)
                                goto err_kfree_meta;
 
-                       dma_test = dma_map_single(dev->dev->dev,
+                       dma_test = dma_map_single(dev->dev->dma_dev,
                                                  ring->txhdr_cache,
                                                  b43_txhdr_size(dev),
                                                  DMA_TO_DEVICE);
                        }
                }
 
-               dma_unmap_single(dev->dev->dev,
+               dma_unmap_single(dev->dev->dma_dev,
                                 dma_test, b43_txhdr_size(dev),
                                 DMA_TO_DEVICE);
        }