]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/scsi/qla4xxx/ql4_iocb.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[linux-2.6-omap-h63xx.git] / drivers / scsi / qla4xxx / ql4_iocb.c
index d41ce380eedcbd298185a15db290cb15c946b00a..912a67494adff6928cc52f760d34b2da89149e6e 100644 (file)
@@ -6,6 +6,10 @@
  */
 
 #include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_dbg.h"
+#include "ql4_inline.h"
+
 
 #include <scsi/scsi_tcq.h>
 
@@ -19,8 +23,8 @@
  *     - advances the request_in pointer
  *     - checks for queue full
  **/
-int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
-                       struct queue_entry **queue_entry)
+static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
+                              struct queue_entry **queue_entry)
 {
        uint16_t request_in;
        uint8_t status = QLA_SUCCESS;
@@ -63,9 +67,9 @@ int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
  * This routine issues a marker IOCB.
  **/
 int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
-                            struct ddb_entry *ddb_entry, int lun)
+       struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
 {
-       struct marker_entry *marker_entry;
+       struct qla4_marker_entry *marker_entry;
        unsigned long flags = 0;
        uint8_t status = QLA_SUCCESS;
 
@@ -83,7 +87,7 @@ int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
        marker_entry->hdr.entryType = ET_MARKER;
        marker_entry->hdr.entryCount = 1;
        marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
-       marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
+       marker_entry->modifier = cpu_to_le16(mrkr_mod);
        int_to_scsilun(lun, &marker_entry->lun);
        wmb();
 
@@ -96,7 +100,7 @@ exit_send_marker:
        return status;
 }
 
-struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
+static struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
        struct scsi_qla_host *ha)
 {
        struct continuation_t1_entry *cont_entry;
@@ -120,7 +124,7 @@ struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
        return cont_entry;
 }
 
-uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
+static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
 {
        uint16_t iocbs;
 
@@ -133,19 +137,21 @@ uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
        return iocbs;
 }
 
-void qla4xxx_build_scsi_iocbs(struct srb *srb,
-                             struct command_t3_entry *cmd_entry,
-                             uint16_t tot_dsds)
+static void qla4xxx_build_scsi_iocbs(struct srb *srb,
+                                    struct command_t3_entry *cmd_entry,
+                                    uint16_t tot_dsds)
 {
        struct scsi_qla_host *ha;
        uint16_t avail_dsds;
        struct data_seg_a64 *cur_dsd;
        struct scsi_cmnd *cmd;
+       struct scatterlist *sg;
+       int i;
 
        cmd = srb->cmd;
        ha = srb->ha;
 
-       if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
+       if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
                /* No data being transferred */
                cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
                return;
@@ -154,40 +160,27 @@ void qla4xxx_build_scsi_iocbs(struct srb *srb,
        avail_dsds = COMMAND_SEG;
        cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
 
-       /* Load data segments */
-       if (cmd->use_sg) {
-               struct scatterlist *cur_seg;
-               struct scatterlist *end_seg;
-
-               cur_seg = (struct scatterlist *)cmd->request_buffer;
-               end_seg = cur_seg + tot_dsds;
-               while (cur_seg < end_seg) {
-                       dma_addr_t sle_dma;
-
-                       /* Allocate additional continuation packets? */
-                       if (avail_dsds == 0) {
-                               struct continuation_t1_entry *cont_entry;
-
-                               cont_entry = qla4xxx_alloc_cont_entry(ha);
-                               cur_dsd =
-                                       (struct data_seg_a64 *)
-                                       &cont_entry->dataseg[0];
-                               avail_dsds = CONTINUE_SEG;
-                       }
-
-                       sle_dma = sg_dma_address(cur_seg);
-                       cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
-                       cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
-                       cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
-                       avail_dsds--;
-
-                       cur_dsd++;
-                       cur_seg++;
+       scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+               dma_addr_t sle_dma;
+
+               /* Allocate additional continuation packets? */
+               if (avail_dsds == 0) {
+                       struct continuation_t1_entry *cont_entry;
+
+                       cont_entry = qla4xxx_alloc_cont_entry(ha);
+                       cur_dsd =
+                               (struct data_seg_a64 *)
+                               &cont_entry->dataseg[0];
+                       avail_dsds = CONTINUE_SEG;
                }
-       } else {
-               cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
-               cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
-               cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
+
+               sle_dma = sg_dma_address(sg);
+               cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
+               cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
+               cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
+               avail_dsds--;
+
+               cur_dsd++;
        }
 }
 
@@ -204,8 +197,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
        struct scsi_cmnd *cmd = srb->cmd;
        struct ddb_entry *ddb_entry;
        struct command_t3_entry *cmd_entry;
-       struct scatterlist *sg = NULL;
 
+       int nseg;
        uint16_t tot_dsds;
        uint16_t req_cnt;
 
@@ -217,14 +210,6 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
        /* Get real lun and adapter */
        ddb_entry = srb->ddb;
 
-       /* Send marker(s) if needed. */
-       if (ha->marker_needed == 1) {
-               if (qla4xxx_send_marker_iocb(ha, ddb_entry,
-                                            cmd->device->lun) != QLA_SUCCESS)
-                       return QLA_ERROR;
-
-               ha->marker_needed = 0;
-       }
        tot_dsds = 0;
 
        /* Acquire hardware specific lock */
@@ -233,24 +218,11 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
        index = (uint32_t)cmd->request->tag;
 
        /* Calculate the number of request entries needed. */
-       if (cmd->use_sg) {
-               sg = (struct scatterlist *)cmd->request_buffer;
-               tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
-                                     cmd->sc_data_direction);
-               if (tot_dsds == 0)
-                       goto queuing_error;
-       } else if (cmd->request_bufflen) {
-               dma_addr_t      req_dma;
-
-               req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
-                                        cmd->request_bufflen,
-                                        cmd->sc_data_direction);
-               if (dma_mapping_error(req_dma))
-                       goto queuing_error;
-
-               srb->dma_handle = req_dma;
-               tot_dsds = 1;
-       }
+       nseg = scsi_dma_map(cmd);
+       if (nseg < 0)
+               goto queuing_error;
+       tot_dsds = nseg;
+
        req_cnt = qla4xxx_calc_request_entries(tot_dsds);
 
        if (ha->req_q_count < (req_cnt + 2)) {
@@ -279,7 +251,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
 
        int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
        cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
-       cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
+       cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
        memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
        cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
        cmd_entry->hdr.entryCount = req_cnt;
@@ -289,13 +261,13 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
         *       transferred, as the data direction bit is sometimed filled
         *       in when there is no data to be transferred */
        cmd_entry->control_flags = CF_NO_DATA;
-       if (cmd->request_bufflen) {
+       if (scsi_bufflen(cmd)) {
                if (cmd->sc_data_direction == DMA_TO_DEVICE)
                        cmd_entry->control_flags = CF_WRITE;
                else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
                        cmd_entry->control_flags = CF_READ;
 
-               ha->bytes_xfered += cmd->request_bufflen;
+               ha->bytes_xfered += scsi_bufflen(cmd);
                if (ha->bytes_xfered & ~0xFFFFF){
                        ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
                        ha->bytes_xfered &= 0xFFFFF;
@@ -359,14 +331,9 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
        return QLA_SUCCESS;
 
 queuing_error:
+       if (tot_dsds)
+               scsi_dma_unmap(cmd);
 
-       if (cmd->use_sg && tot_dsds) {
-               sg = (struct scatterlist *) cmd->request_buffer;
-               pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
-                            cmd->sc_data_direction);
-       } else if (tot_dsds)
-               pci_unmap_single(ha->pdev, srb->dma_handle,
-                                cmd->request_bufflen, cmd->sc_data_direction);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        return QLA_ERROR;