if (qc->flags & ATA_QCFLAG_SG) {
                if (qc->n_elem)
-                       dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
+                       dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
                /* restore last sg */
                sg[qc->orig_n_elem - 1].length += qc->pad_len;
                if (pad_buf) {
                }
        } else {
                if (qc->n_elem)
-                       dma_unmap_single(ap->host_set->dev,
+                       dma_unmap_single(ap->dev,
                                sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
                                dir);
                /* restore sg */
                goto skip_map;
        }
 
-       dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
+       dma_address = dma_map_single(ap->dev, qc->buf_virt,
                                     sg->length, dir);
        if (dma_mapping_error(dma_address)) {
                /* restore sg */
        }
 
        dir = qc->dma_dir;
-       n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
+       n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
        if (n_elem < 1) {
                /* restore last sg */
                lsg->length += qc->pad_len;
 
 int ata_port_start (struct ata_port *ap)
 {
-       struct device *dev = ap->host_set->dev;
+       struct device *dev = ap->dev;
        int rc;
 
        ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
 
 void ata_port_stop (struct ata_port *ap)
 {
-       struct device *dev = ap->host_set->dev;
+       struct device *dev = ap->dev;
 
        dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
        ata_pad_free(ap, dev);
        ap->host = host;
        ap->ctl = ATA_DEVCTL_OBS;
        ap->host_set = host_set;
+       ap->dev = ent->dev;
        ap->port_no = port_no;
        ap->hard_port_no =
                ent->legacy_mode ? ent->hard_port_no : port_no;