]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/infiniband/core/mad.c
Merge branch 'master' into 83xx
[linux-2.6-omap-h63xx.git] / drivers / infiniband / core / mad.c
index 15f38d94b3a853b3a86f002cd11c8e0333a12ba8..13efd417034947dc12d812b8fd495cfc8802d210 100644 (file)
@@ -642,7 +642,8 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
        spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
 }
 
-static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
+static void build_smp_wc(struct ib_qp *qp,
+                        u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
                         struct ib_wc *wc)
 {
        memset(wc, 0, sizeof *wc);
@@ -652,7 +653,7 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
        wc->pkey_index = pkey_index;
        wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
        wc->src_qp = IB_QP0;
-       wc->qp_num = IB_QP0;
+       wc->qp = qp;
        wc->slid = slid;
        wc->sl = 0;
        wc->dlid_path_bits = 0;
@@ -713,7 +714,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
                goto out;
        }
 
-       build_smp_wc(send_wr->wr_id, be16_to_cpu(smp->dr_slid),
+       build_smp_wc(mad_agent_priv->agent.qp,
+                    send_wr->wr_id, be16_to_cpu(smp->dr_slid),
                     send_wr->wr.ud.pkey_index,
                     send_wr->wr.ud.port_num, &mad_wc);
 
@@ -998,17 +1000,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
 
        mad_agent = mad_send_wr->send_buf.mad_agent;
        sge = mad_send_wr->sg_list;
-       sge[0].addr = dma_map_single(mad_agent->device->dma_device,
-                                    mad_send_wr->send_buf.mad,
-                                    sge[0].length,
-                                    DMA_TO_DEVICE);
-       pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
-
-       sge[1].addr = dma_map_single(mad_agent->device->dma_device,
-                                    ib_get_payload(mad_send_wr),
-                                    sge[1].length,
-                                    DMA_TO_DEVICE);
-       pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
+       sge[0].addr = ib_dma_map_single(mad_agent->device,
+                                       mad_send_wr->send_buf.mad,
+                                       sge[0].length,
+                                       DMA_TO_DEVICE);
+       mad_send_wr->header_mapping = sge[0].addr;
+
+       sge[1].addr = ib_dma_map_single(mad_agent->device,
+                                       ib_get_payload(mad_send_wr),
+                                       sge[1].length,
+                                       DMA_TO_DEVICE);
+       mad_send_wr->payload_mapping = sge[1].addr;
 
        spin_lock_irqsave(&qp_info->send_queue.lock, flags);
        if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -1026,12 +1028,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
        }
        spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
        if (ret) {
-               dma_unmap_single(mad_agent->device->dma_device,
-                                pci_unmap_addr(mad_send_wr, header_mapping),
-                                sge[0].length, DMA_TO_DEVICE);
-               dma_unmap_single(mad_agent->device->dma_device,
-                                pci_unmap_addr(mad_send_wr, payload_mapping),
-                                sge[1].length, DMA_TO_DEVICE);
+               ib_dma_unmap_single(mad_agent->device,
+                                   mad_send_wr->header_mapping,
+                                   sge[0].length, DMA_TO_DEVICE);
+               ib_dma_unmap_single(mad_agent->device,
+                                   mad_send_wr->payload_mapping,
+                                   sge[1].length, DMA_TO_DEVICE);
        }
        return ret;
 }
@@ -1850,11 +1852,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
        mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
                                    mad_list);
        recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
-       dma_unmap_single(port_priv->device->dma_device,
-                        pci_unmap_addr(&recv->header, mapping),
-                        sizeof(struct ib_mad_private) -
-                        sizeof(struct ib_mad_private_header),
-                        DMA_FROM_DEVICE);
+       ib_dma_unmap_single(port_priv->device,
+                           recv->header.mapping,
+                           sizeof(struct ib_mad_private) -
+                             sizeof(struct ib_mad_private_header),
+                           DMA_FROM_DEVICE);
 
        /* Setup MAD receive work completion from "normal" work completion */
        recv->header.wc = *wc;
@@ -2080,12 +2082,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
        qp_info = send_queue->qp_info;
 
 retry:
-       dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
-                        pci_unmap_addr(mad_send_wr, header_mapping),
-                        mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
-       dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
-                        pci_unmap_addr(mad_send_wr, payload_mapping),
-                        mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
+       ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+                           mad_send_wr->header_mapping,
+                           mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
+       ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
+                           mad_send_wr->payload_mapping,
+                           mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
        queued_send_wr = NULL;
        spin_lock_irqsave(&send_queue->lock, flags);
        list_del(&mad_list->list);
@@ -2355,7 +2357,8 @@ static void local_completions(struct work_struct *work)
                         * Defined behavior is to complete response
                         * before request
                         */
-                       build_smp_wc((unsigned long) local->mad_send_wr,
+                       build_smp_wc(recv_mad_agent->agent.qp,
+                                    (unsigned long) local->mad_send_wr,
                                     be16_to_cpu(IB_LID_PERMISSIVE),
                                     0, recv_mad_agent->agent.port_num, &wc);
 
@@ -2528,13 +2531,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                                break;
                        }
                }
-               sg_list.addr = dma_map_single(qp_info->port_priv->
-                                               device->dma_device,
-                                             &mad_priv->grh,
-                                             sizeof *mad_priv -
-                                               sizeof mad_priv->header,
-                                             DMA_FROM_DEVICE);
-               pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
+               sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
+                                                &mad_priv->grh,
+                                                sizeof *mad_priv -
+                                                  sizeof mad_priv->header,
+                                                DMA_FROM_DEVICE);
+               mad_priv->header.mapping = sg_list.addr;
                recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
                mad_priv->header.mad_list.mad_queue = recv_queue;
 
@@ -2549,12 +2551,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                        list_del(&mad_priv->header.mad_list.list);
                        recv_queue->count--;
                        spin_unlock_irqrestore(&recv_queue->lock, flags);
-                       dma_unmap_single(qp_info->port_priv->device->dma_device,
-                                        pci_unmap_addr(&mad_priv->header,
-                                                       mapping),
-                                        sizeof *mad_priv -
-                                          sizeof mad_priv->header,
-                                        DMA_FROM_DEVICE);
+                       ib_dma_unmap_single(qp_info->port_priv->device,
+                                           mad_priv->header.mapping,
+                                           sizeof *mad_priv -
+                                             sizeof mad_priv->header,
+                                           DMA_FROM_DEVICE);
                        kmem_cache_free(ib_mad_cache, mad_priv);
                        printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
                        break;
@@ -2586,11 +2587,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
                /* Remove from posted receive MAD list */
                list_del(&mad_list->list);
 
-               dma_unmap_single(qp_info->port_priv->device->dma_device,
-                                pci_unmap_addr(&recv->header, mapping),
-                                sizeof(struct ib_mad_private) -
-                                sizeof(struct ib_mad_private_header),
-                                DMA_FROM_DEVICE);
+               ib_dma_unmap_single(qp_info->port_priv->device,
+                                   recv->header.mapping,
+                                   sizeof(struct ib_mad_private) -
+                                     sizeof(struct ib_mad_private_header),
+                                   DMA_FROM_DEVICE);
                kmem_cache_free(ib_mad_cache, recv);
        }