};
 
 #ifdef CONFIG_PPC_ISERIES
-struct device *iSeries_vio_dev = &vio_bus_device.dev;
-EXPORT_SYMBOL(iSeries_vio_dev);
-
 static struct iommu_table veth_iommu_table;
-static struct iommu_table vio_iommu_table;
+struct iommu_table vio_iommu_table;
 
 static void __init iommu_vio_init(void)
 {
                printk("Virtual Bus VETH TCE table failed.\n");
        if (!iommu_init_table(&vio_iommu_table, -1))
                printk("Virtual Bus VIO TCE table failed.\n");
-       vio_bus_device.dev.archdata.dma_ops = &dma_iommu_ops;
-       vio_bus_device.dev.archdata.dma_data = &vio_iommu_table;
 }
 #else
 static void __init iommu_vio_init(void)
 
 #include <linux/dma-mapping.h>
 #include <linux/list.h>
 #include <linux/pci.h>
+#include <linux/module.h>
 
 #include <asm/iommu.h>
 #include <asm/tce.h>
 #include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/iseries/hv_call_xm.h>
+#include <asm/iseries/hv_call_event.h>
 #include <asm/iseries/iommu.h>
 
 static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
 }
 #endif
 
+extern struct iommu_table vio_iommu_table;
+
+void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
+{
+       return iommu_alloc_coherent(&vio_iommu_table, size, dma_handle,
+                               DMA_32BIT_MASK, flag, -1);
+}
+EXPORT_SYMBOL_GPL(iseries_hv_alloc);
+
+void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+       iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
+}
+EXPORT_SYMBOL_GPL(iseries_hv_free);
+
+dma_addr_t iseries_hv_map(void *vaddr, size_t size,
+                       enum dma_data_direction direction)
+{
+       return iommu_map_single(&vio_iommu_table, vaddr, size,
+                               DMA_32BIT_MASK, direction);
+}
+
+void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
+                       enum dma_data_direction direction)
+{
+       iommu_unmap_single(&vio_iommu_table, dma_handle, size, direction);
+}
+
 void iommu_init_early_iSeries(void)
 {
        ppc_md.tce_build = tce_build_iSeries;
 
 #include <asm/paca.h>
 #include <asm/abs_addr.h>
 #include <asm/firmware.h>
-#include <asm/iseries/vio.h>
 #include <asm/iseries/mf.h>
 #include <asm/iseries/hv_lp_config.h>
+#include <asm/iseries/hv_lp_event.h>
 #include <asm/iseries/it_lp_queue.h>
 
 #include "setup.h"
        if ((off + count) > 256)
                count = 256 - off;
 
-       dma_addr = dma_map_single(iSeries_vio_dev, page, off + count,
-                       DMA_FROM_DEVICE);
+       dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE);
        if (dma_mapping_error(dma_addr))
                return -ENOMEM;
        memset(page, 0, off + count);
        vsp_cmd.sub_data.kern.length = off + count;
        mb();
        rc = signal_vsp_instruction(&vsp_cmd);
-       dma_unmap_single(iSeries_vio_dev, dma_addr, off + count,
-                       DMA_FROM_DEVICE);
+       iseries_hv_unmap(dma_addr, off + count, DMA_FROM_DEVICE);
        if (rc)
                return rc;
        if (vsp_cmd.result_code != 0)
        int len = *size;
        dma_addr_t dma_addr;
 
-       dma_addr = dma_map_single(iSeries_vio_dev, buffer, len,
-                       DMA_FROM_DEVICE);
+       dma_addr = iseries_hv_map(buffer, len, DMA_FROM_DEVICE);
        memset(buffer, 0, len);
        memset(&vsp_cmd, 0, sizeof(vsp_cmd));
        vsp_cmd.cmd = 32;
                        rc = -ENOMEM;
        }
 
-       dma_unmap_single(iSeries_vio_dev, dma_addr, len, DMA_FROM_DEVICE);
+       iseries_hv_unmap(dma_addr, len, DMA_FROM_DEVICE);
 
        return rc;
 }
                goto out;
 
        dma_addr = 0;
-       page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr,
-                       GFP_ATOMIC);
+       page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
        ret = -ENOMEM;
        if (page == NULL)
                goto out;
        ret = count;
 
 out_free:
-       dma_free_coherent(iSeries_vio_dev, count, page, dma_addr);
+       iseries_hv_free(count, page, dma_addr);
 out:
        return ret;
 }
                goto out;
 
        dma_addr = 0;
-       page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr,
-                       GFP_ATOMIC);
+       page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
        rc = -ENOMEM;
        if (page == NULL) {
                printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
        *ppos += count;
        rc = count;
 out_free:
-       dma_free_coherent(iSeries_vio_dev, count, page, dma_addr);
+       iseries_hv_free(count, page, dma_addr);
 out:
        return rc;
 }
 
        if (!buf)
                return 0;
 
-       handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
-                               DMA_FROM_DEVICE);
+       handle = iseries_hv_map(buf, HW_PAGE_SIZE, DMA_FROM_DEVICE);
 
        hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
                        HvLpEvent_Type_VirtualIo,
        buf[HW_PAGE_SIZE-1] = '\0';
        seq_printf(m, "%s", buf);
 
-       dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE,
-                        DMA_FROM_DEVICE);
+       iseries_hv_unmap(handle, HW_PAGE_SIZE, DMA_FROM_DEVICE);
        kfree(buf);
 
        seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
 
        struct cdrom_info *viocd_unitinfo;
        dma_addr_t unitinfo_dmaaddr;
 
-       viocd_unitinfo = dma_alloc_coherent(iSeries_vio_dev,
+       viocd_unitinfo = iseries_hv_alloc(
                        sizeof(*viocd_unitinfo) * VIOCD_MAX_CD,
                        &unitinfo_dmaaddr, GFP_ATOMIC);
        if (viocd_unitinfo == NULL) {
        }
 
 error_ret:
-       dma_free_coherent(iSeries_vio_dev,
-                       sizeof(*viocd_unitinfo) * VIOCD_MAX_CD,
+       iseries_hv_free(sizeof(*viocd_unitinfo) * VIOCD_MAX_CD,
                        viocd_unitinfo, unitinfo_dmaaddr);
 }
 
 
        if (op == NULL)
                return -ENOMEM;
 
-       viotape_unitinfo = dma_alloc_coherent(iSeries_vio_dev, len,
-               &viotape_unitinfo_token, GFP_ATOMIC);
+       viotape_unitinfo = iseries_hv_alloc(len, &viotape_unitinfo_token,
+               GFP_ATOMIC);
        if (viotape_unitinfo == NULL) {
                free_op_struct(op);
                return -ENOMEM;
        class_destroy(tape_class);
        unregister_chrdev(VIOTAPE_MAJOR, "viotape");
        if (viotape_unitinfo)
-               dma_free_coherent(iSeries_vio_dev,
-                               sizeof(viotape_unitinfo[0]) * VIOTAPE_MAX_TAPE,
+               iseries_hv_free(sizeof(viotape_unitinfo[0]) * VIOTAPE_MAX_TAPE,
                                viotape_unitinfo, viotape_unitinfo_token);
        viopath_close(viopath_hostLp, viomajorsubtype_tape, VIOTAPE_MAXREQ + 2);
        vio_clearHandler(viomajorsubtype_tape);
 
 #ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
 #define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
 
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+
 #include <asm/iseries/hv_call_sc.h>
 #include <asm/iseries/hv_types.h>
 #include <asm/abs_addr.h>
                        eventData3, eventData4, eventData5);
 }
 
+extern void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag);
+extern void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle);
+extern dma_addr_t iseries_hv_map(void *vaddr, size_t size,
+                       enum dma_data_direction direction);
+extern void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
+                       enum dma_data_direction direction);
+
 static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
 {
        return HvCall1(HvCallEventAckLpEvent, virt_to_abs(event));
 
        viochar_rc_ebusy = 1
 };
 
-struct device;
-
-extern struct device *iSeries_vio_dev;
-
 #endif /* _ASM_POWERPC_ISERIES_VIO_H */