]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/scsi/scsi_lib.c
PCI: implement new suspend/resume callbacks
[linux-2.6-omap-h63xx.git] / drivers / scsi / scsi_lib.c
index b12fb310e3999af497c21778b62f40c46892e8fd..a82d2fe80fb544da5afb86d835e6fbafb3a74533 100644 (file)
@@ -301,7 +301,6 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
                page = sg_page(sg);
                off = sg->offset;
                len = sg->length;
-               data_len += len;
 
                while (len > 0 && data_len > 0) {
                        /*
@@ -446,7 +445,7 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
        scsi_set_resid(cmd, 0);
        memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
        if (cmd->cmd_len == 0)
-               cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
+               cmd->cmd_len = scsi_command_size(cmd->cmnd);
 }
 
 void scsi_device_unbusy(struct scsi_device *sdev)
@@ -537,6 +536,9 @@ static void scsi_run_queue(struct request_queue *q)
               !shost->host_blocked && !shost->host_self_blocked &&
                !((shost->can_queue > 0) &&
                  (shost->host_busy >= shost->can_queue))) {
+
+               int flagset;
+
                /*
                 * As long as shost is accepting commands and we have
                 * starved queues, call blk_run_queue. scsi_request_fn
@@ -550,19 +552,20 @@ static void scsi_run_queue(struct request_queue *q)
                sdev = list_entry(shost->starved_list.next,
                                          struct scsi_device, starved_entry);
                list_del_init(&sdev->starved_entry);
-               spin_unlock_irqrestore(shost->host_lock, flags);
-
-
-               if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
-                   !test_and_set_bit(QUEUE_FLAG_REENTER,
-                                     &sdev->request_queue->queue_flags)) {
-                       blk_run_queue(sdev->request_queue);
-                       clear_bit(QUEUE_FLAG_REENTER,
-                                 &sdev->request_queue->queue_flags);
-               } else
-                       blk_run_queue(sdev->request_queue);
+               spin_unlock(shost->host_lock);
+
+               spin_lock(sdev->request_queue->queue_lock);
+               flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
+                               !test_bit(QUEUE_FLAG_REENTER,
+                                       &sdev->request_queue->queue_flags);
+               if (flagset)
+                       queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
+               __blk_run_queue(sdev->request_queue);
+               if (flagset)
+                       queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
+               spin_unlock(sdev->request_queue->queue_lock);
 
-               spin_lock_irqsave(shost->host_lock, flags);
+               spin_lock(shost->host_lock);
                if (unlikely(!list_empty(&sdev->starved_entry)))
                        /*
                         * sdev lost a race, and was put back on the
@@ -785,7 +788,7 @@ EXPORT_SYMBOL(scsi_release_buffers);
  * in req->data_len and req->next_rq->data_len. The upper-layer driver can
  * decide what to do with this information.
  */
-void scsi_end_bidi_request(struct scsi_cmnd *cmd)
+static void scsi_end_bidi_request(struct scsi_cmnd *cmd)
 {
        struct request *req = cmd->request;
        unsigned int dlen = req->data_len;
@@ -840,7 +843,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        int this_count = scsi_bufflen(cmd);
        struct request_queue *q = cmd->device->request_queue;
        struct request *req = cmd->request;
-       int clear_errors = 1;
+       int error = 0;
        struct scsi_sense_hdr sshdr;
        int sense_valid = 0;
        int sense_deferred = 0;
@@ -854,7 +857,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
                req->errors = result;
                if (result) {
-                       clear_errors = 0;
                        if (sense_valid && req->sense) {
                                /*
                                 * SG_IO wants current and deferred errors
@@ -866,6 +868,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                                memcpy(req->sense, cmd->sense_buffer,  len);
                                req->sense_len = len;
                        }
+                       if (!sense_deferred)
+                               error = -EIO;
                }
                if (scsi_bidi_cmnd(cmd)) {
                        /* will also release_buffers */
@@ -886,14 +890,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                                      "%d bytes done.\n",
                                      req->nr_sectors, good_bytes));
 
-       if (clear_errors)
-               req->errors = 0;
-
        /* A number of bytes were successfully read.  If there
         * are leftovers and there is some kind of error
         * (result != 0), retry the rest.
         */
-       if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL)
+       if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
                return;
 
        /* good_bytes = 0, or (inclusive) there were leftovers and
@@ -1015,10 +1016,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
        }
 
        req->buffer = NULL;
-       if (blk_pc_request(req))
-               sdb->length = req->data_len;
-       else
-               sdb->length = req->nr_sectors << 9;
 
        /* 
         * Next, walk the list, and fill in the addresses and sizes of
@@ -1027,6 +1024,10 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
        count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
        BUG_ON(count > sdb->table.nents);
        sdb->table.nents = count;
+       if (blk_pc_request(req))
+               sdb->length = req->data_len;
+       else
+               sdb->length = req->nr_sectors << 9;
        return BLKPREP_OK;
 }
 
@@ -1093,6 +1094,8 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
        cmd->tag = req->tag;
        cmd->request = req;
 
+       cmd->cmnd = req->cmd;
+
        return cmd;
 }
 
@@ -1130,8 +1133,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
                req->buffer = NULL;
        }
 
-       BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
-       memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
        cmd->cmd_len = req->cmd_len;
        if (!req->data_len)
                cmd->sc_data_direction = DMA_NONE;
@@ -1168,6 +1169,7 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
        if (unlikely(!cmd))
                return BLKPREP_DEFER;
 
+       memset(cmd->cmnd, 0, BLK_MAX_CDB);
        return scsi_init_io(cmd, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(scsi_setup_fs_cmnd);
@@ -1569,6 +1571,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
                                         request_fn_proc *request_fn)
 {
        struct request_queue *q;
+       struct device *dev = shost->shost_gendev.parent;
 
        q = blk_init_queue(request_fn, NULL);
        if (!q)
@@ -1583,9 +1586,13 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
        blk_queue_max_sectors(q, shost->max_sectors);
        blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
        blk_queue_segment_boundary(q, shost->dma_boundary);
+       dma_set_seg_boundary(dev, shost->dma_boundary);
 
+       blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
+
+       /* New queue, no concurrency on queue_flags */
        if (!shost->use_clustering)
-               clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+               queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
 
        /*
         * set a reasonable default alignment on word boundaries: the
@@ -2159,10 +2166,15 @@ void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
 {
        unsigned long flags;
 
+#if 0
+       /* FIXME: currently this check eliminates all media change events
+        * for polled devices.  Need to update to discriminate between AN
+        * and polled events */
        if (!test_bit(evt->evt_type, sdev->supported_events)) {
                kfree(evt);
                return;
        }
+#endif
 
        spin_lock_irqsave(&sdev->list_lock, flags);
        list_add_tail(&evt->node, &sdev->event_list);