]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/scsi/arcmsr/arcmsr_hba.c
Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-mmc
[linux-2.6-omap-h63xx.git] / drivers / scsi / arcmsr / arcmsr_hba.c
1 /*
2 *******************************************************************************
3 **        O.S   : Linux
4 **   FILE NAME  : arcmsr_hba.c
5 **        BY    : Erich Chen
6 **   Description: SCSI RAID Device Driver for
7 **                ARECA RAID Host adapter
8 *******************************************************************************
9 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
10 **
11 **     Web site: www.areca.com.tw
12 **       E-mail: erich@areca.com.tw
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License version 2 as
16 ** published by the Free Software Foundation.
17 ** This program is distributed in the hope that it will be useful,
18 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 ** GNU General Public License for more details.
21 *******************************************************************************
22 ** Redistribution and use in source and binary forms, with or without
23 ** modification, are permitted provided that the following conditions
24 ** are met:
25 ** 1. Redistributions of source code must retain the above copyright
26 **    notice, this list of conditions and the following disclaimer.
27 ** 2. Redistributions in binary form must reproduce the above copyright
28 **    notice, this list of conditions and the following disclaimer in the
29 **    documentation and/or other materials provided with the distribution.
30 ** 3. The name of the author may not be used to endorse or promote products
31 **    derived from this software without specific prior written permission.
32 **
33 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 *******************************************************************************
44 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45 **     Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46 *******************************************************************************
47 */
48 #include <linux/module.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/pci_ids.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/errno.h>
55 #include <linux/types.h>
56 #include <linux/delay.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/timer.h>
59 #include <linux/pci.h>
60 #include <asm/dma.h>
61 #include <asm/io.h>
62 #include <asm/system.h>
63 #include <asm/uaccess.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi.h>
66 #include <scsi/scsi_cmnd.h>
67 #include <scsi/scsi_tcq.h>
68 #include <scsi/scsi_device.h>
69 #include <scsi/scsi_transport.h>
70 #include <scsi/scsicam.h>
71 #include "arcmsr.h"
72
73 MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
74 MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter");
75 MODULE_LICENSE("Dual BSD/GPL");
76 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd);
79 static int arcmsr_abort(struct scsi_cmnd *);
80 static int arcmsr_bus_reset(struct scsi_cmnd *);
81 static int arcmsr_bios_param(struct scsi_device *sdev,
82                                 struct block_device *bdev, sector_t capacity, int *info);
83 static int arcmsr_queue_command(struct scsi_cmnd * cmd,
84                                 void (*done) (struct scsi_cmnd *));
85 static int arcmsr_probe(struct pci_dev *pdev,
86                                 const struct pci_device_id *id);
87 static void arcmsr_remove(struct pci_dev *pdev);
88 static void arcmsr_shutdown(struct pci_dev *pdev);
89 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
90 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
91 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
92 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
93 static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
94 static const char *arcmsr_info(struct Scsi_Host *);
95 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
96
97 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
98 {
99         if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
100                 queue_depth = ARCMSR_MAX_CMD_PERLUN;
101         scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
102         return queue_depth;
103 }
104
105 static struct scsi_host_template arcmsr_scsi_host_template = {
106         .module                 = THIS_MODULE,
107         .name                   = "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION,
108         .info                   = arcmsr_info,
109         .queuecommand           = arcmsr_queue_command,
110         .eh_abort_handler       = arcmsr_abort,
111         .eh_bus_reset_handler   = arcmsr_bus_reset,
112         .bios_param             = arcmsr_bios_param,
113         .change_queue_depth     = arcmsr_adjust_disk_queue_depth,
114         .can_queue              = ARCMSR_MAX_OUTSTANDING_CMD,
115         .this_id                = ARCMSR_SCSI_INITIATOR_ID,
116         .sg_tablesize           = ARCMSR_MAX_SG_ENTRIES,
117         .max_sectors            = ARCMSR_MAX_XFER_SECTORS,
118         .cmd_per_lun            = ARCMSR_MAX_CMD_PERLUN,
119         .use_clustering         = ENABLE_CLUSTERING,
120         .shost_attrs            = arcmsr_host_attrs,
121 };
122
123 static struct pci_device_id arcmsr_device_id_table[] = {
124         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
125         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
126         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
127         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
128         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
129         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
130         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
131         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
132         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
133         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
134         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
135         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
136         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
137         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
138         {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
139         {0, 0}, /* Terminating entry */
140 };
141 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
142 static struct pci_driver arcmsr_pci_driver = {
143         .name                   = "arcmsr",
144         .id_table               = arcmsr_device_id_table,
145         .probe                  = arcmsr_probe,
146         .remove                 = arcmsr_remove,
147         .shutdown               = arcmsr_shutdown
148 };
149
150 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id,
151         struct pt_regs *regs)
152 {
153         irqreturn_t handle_state;
154         struct AdapterControlBlock *acb;
155         unsigned long flags;
156
157         acb = (struct AdapterControlBlock *)dev_id;
158
159         spin_lock_irqsave(acb->host->host_lock, flags);
160         handle_state = arcmsr_interrupt(acb);
161         spin_unlock_irqrestore(acb->host->host_lock, flags);
162         return handle_state;
163 }
164
165 static int arcmsr_bios_param(struct scsi_device *sdev,
166                 struct block_device *bdev, sector_t capacity, int *geom)
167 {
168         int ret, heads, sectors, cylinders, total_capacity;
169         unsigned char *buffer;/* return copy of block device's partition table */
170
171         buffer = scsi_bios_ptable(bdev);
172         if (buffer) {
173                 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
174                 kfree(buffer);
175                 if (ret != -1)
176                         return ret;
177         }
178         total_capacity = capacity;
179         heads = 64;
180         sectors = 32;
181         cylinders = total_capacity / (heads * sectors);
182         if (cylinders > 1024) {
183                 heads = 255;
184                 sectors = 63;
185                 cylinders = total_capacity / (heads * sectors);
186         }
187         geom[0] = heads;
188         geom[1] = sectors;
189         geom[2] = cylinders;
190         return 0;
191 }
192
193 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
194 {
195         struct pci_dev *pdev = acb->pdev;
196         struct MessageUnit __iomem *reg = acb->pmu;
197         u32 ccb_phyaddr_hi32;
198         void *dma_coherent;
199         dma_addr_t dma_coherent_handle, dma_addr;
200         struct CommandControlBlock *ccb_tmp;
201         int i, j;
202
203         dma_coherent = dma_alloc_coherent(&pdev->dev,
204                         ARCMSR_MAX_FREECCB_NUM *
205                         sizeof (struct CommandControlBlock) + 0x20,
206                         &dma_coherent_handle, GFP_KERNEL);
207         if (!dma_coherent)
208                 return -ENOMEM;
209
210         acb->dma_coherent = dma_coherent;
211         acb->dma_coherent_handle = dma_coherent_handle;
212
213         if (((unsigned long)dma_coherent & 0x1F)) {
214                 dma_coherent = dma_coherent +
215                         (0x20 - ((unsigned long)dma_coherent & 0x1F));
216                 dma_coherent_handle = dma_coherent_handle +
217                         (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
218         }
219
220         dma_addr = dma_coherent_handle;
221         ccb_tmp = (struct CommandControlBlock *)dma_coherent;
222         for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
223                 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
224                 ccb_tmp->acb = acb;
225                 acb->pccb_pool[i] = ccb_tmp;
226                 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
227                 dma_addr = dma_addr + sizeof (struct CommandControlBlock);
228                 ccb_tmp++;
229         }
230
231         acb->vir2phy_offset = (unsigned long)ccb_tmp -
232                               (unsigned long)dma_addr;
233         for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
234                 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
235                         acb->devstate[i][j] = ARECA_RAID_GOOD;
236
237         /*
238         ** here we need to tell iop 331 our ccb_tmp.HighPart
239         ** if ccb_tmp.HighPart is not zero
240         */
241         ccb_phyaddr_hi32 = (uint32_t) ((dma_coherent_handle >> 16) >> 16);
242         if (ccb_phyaddr_hi32 != 0) {
243                 writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->message_rwbuffer[0]);
244                 writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
245                 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
246                 if (arcmsr_wait_msgint_ready(acb))
247                         printk(KERN_NOTICE "arcmsr%d: "
248                                "'set ccb high part physical address' timeout\n",
249                                 acb->host->host_no);
250         }
251
252         writel(readl(&reg->outbound_intmask) |
253                         ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
254                &reg->outbound_intmask);
255         return 0;
256 }
257
258 static int arcmsr_probe(struct pci_dev *pdev,
259         const struct pci_device_id *id)
260 {
261         struct Scsi_Host *host;
262         struct AdapterControlBlock *acb;
263         uint8_t bus, dev_fun;
264         int error;
265
266         error = pci_enable_device(pdev);
267         if (error)
268                 goto out;
269         pci_set_master(pdev);
270
271         host = scsi_host_alloc(&arcmsr_scsi_host_template,
272                         sizeof(struct AdapterControlBlock));
273         if (!host) {
274                 error = -ENOMEM;
275                 goto out_disable_device;
276         }
277         acb = (struct AdapterControlBlock *)host->hostdata;
278         memset(acb, 0, sizeof (struct AdapterControlBlock));
279
280         error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
281         if (error) {
282                 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
283                 if (error) {
284                         printk(KERN_WARNING
285                                "scsi%d: No suitable DMA mask available\n",
286                                host->host_no);
287                         goto out_host_put;
288                 }
289         }
290         bus = pdev->bus->number;
291         dev_fun = pdev->devfn;
292         acb->host = host;
293         acb->pdev = pdev;
294         host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
295         host->max_lun = ARCMSR_MAX_TARGETLUN;
296         host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
297         host->max_cmd_len = 16;    /*this is issue of 64bit LBA, over 2T byte*/
298         host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
299         host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
300         host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
301         host->this_id = ARCMSR_SCSI_INITIATOR_ID;
302         host->unique_id = (bus << 8) | dev_fun;
303         host->irq = pdev->irq;
304         error = pci_request_regions(pdev, "arcmsr");
305         if (error)
306                 goto out_host_put;
307
308         acb->pmu = ioremap(pci_resource_start(pdev, 0),
309                            pci_resource_len(pdev, 0));
310         if (!acb->pmu) {
311                 printk(KERN_NOTICE "arcmsr%d: memory"
312                         " mapping region fail \n", acb->host->host_no);
313                 goto out_release_regions;
314         }
315         acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
316                            ACB_F_MESSAGE_RQBUFFER_CLEARED |
317                            ACB_F_MESSAGE_WQBUFFER_READED);
318         acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
319         INIT_LIST_HEAD(&acb->ccb_free_list);
320
321         error = arcmsr_alloc_ccb_pool(acb);
322         if (error)
323                 goto out_iounmap;
324
325         error = request_irq(pdev->irq, arcmsr_do_interrupt,
326                         SA_INTERRUPT | SA_SHIRQ, "arcmsr", acb);
327         if (error)
328                 goto out_free_ccb_pool;
329
330         arcmsr_iop_init(acb);
331         pci_set_drvdata(pdev, host);
332
333         error = scsi_add_host(host, &pdev->dev);
334         if (error)
335                 goto out_free_irq;
336
337         error = arcmsr_alloc_sysfs_attr(acb);
338         if (error)
339                 goto out_free_sysfs;
340
341         scsi_scan_host(host);
342         return 0;
343  out_free_sysfs:
344  out_free_irq:
345         free_irq(pdev->irq, acb);
346  out_free_ccb_pool:
347         arcmsr_free_ccb_pool(acb);
348  out_iounmap:
349         iounmap(acb->pmu);
350  out_release_regions:
351         pci_release_regions(pdev);
352  out_host_put:
353         scsi_host_put(host);
354  out_disable_device:
355         pci_disable_device(pdev);
356  out:
357         return error;
358 }
359
360 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
361 {
362         struct MessageUnit __iomem *reg = acb->pmu;
363
364         writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
365         if (arcmsr_wait_msgint_ready(acb))
366                 printk(KERN_NOTICE
367                         "arcmsr%d: wait 'abort all outstanding command' timeout \n"
368                         , acb->host->host_no);
369 }
370
371 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
372 {
373         struct AdapterControlBlock *acb = ccb->acb;
374         struct scsi_cmnd *pcmd = ccb->pcmd;
375
376         if (pcmd->use_sg != 0) {
377                 struct scatterlist *sl;
378
379                 sl = (struct scatterlist *)pcmd->request_buffer;
380                 pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
381         }
382         else if (pcmd->request_bufflen != 0)
383                 pci_unmap_single(acb->pdev,
384                         pcmd->SCp.dma_handle,
385                         pcmd->request_bufflen, pcmd->sc_data_direction);
386 }
387
388 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
389 {
390         struct AdapterControlBlock *acb = ccb->acb;
391         struct scsi_cmnd *pcmd = ccb->pcmd;
392
393         arcmsr_pci_unmap_dma(ccb);
394         if (stand_flag == 1)
395                 atomic_dec(&acb->ccboutstandingcount);
396         ccb->startdone = ARCMSR_CCB_DONE;
397         ccb->ccb_flags = 0;
398         list_add_tail(&ccb->list, &acb->ccb_free_list);
399         pcmd->scsi_done(pcmd);
400 }
401
402 static void arcmsr_remove(struct pci_dev *pdev)
403 {
404         struct Scsi_Host *host = pci_get_drvdata(pdev);
405         struct AdapterControlBlock *acb =
406                 (struct AdapterControlBlock *) host->hostdata;
407         struct MessageUnit __iomem *reg = acb->pmu;
408         int poll_count = 0;
409
410         arcmsr_free_sysfs_attr(acb);
411         scsi_remove_host(host);
412         arcmsr_stop_adapter_bgrb(acb);
413         arcmsr_flush_adapter_cache(acb);
414         writel(readl(&reg->outbound_intmask) |
415                 ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
416                 &reg->outbound_intmask);
417         acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
418         acb->acb_flags &= ~ACB_F_IOP_INITED;
419
420         for (poll_count = 0; poll_count < 256; poll_count++) {
421                 if (!atomic_read(&acb->ccboutstandingcount))
422                         break;
423                 arcmsr_interrupt(acb);
424                 msleep(25);
425         }
426
427         if (atomic_read(&acb->ccboutstandingcount)) {
428                 int i;
429
430                 arcmsr_abort_allcmd(acb);
431                 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
432                         readl(&reg->outbound_queueport);
433                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
434                         struct CommandControlBlock *ccb = acb->pccb_pool[i];
435                         if (ccb->startdone == ARCMSR_CCB_START) {
436                                 ccb->startdone = ARCMSR_CCB_ABORTED;
437                                 ccb->pcmd->result = DID_ABORT << 16;
438                                 arcmsr_ccb_complete(ccb, 1);
439                         }
440                 }
441         }
442
443         free_irq(pdev->irq, acb);
444         iounmap(acb->pmu);
445         arcmsr_free_ccb_pool(acb);
446         pci_release_regions(pdev);
447
448         scsi_host_put(host);
449
450         pci_disable_device(pdev);
451         pci_set_drvdata(pdev, NULL);
452 }
453
454 static void arcmsr_shutdown(struct pci_dev *pdev)
455 {
456         struct Scsi_Host *host = pci_get_drvdata(pdev);
457         struct AdapterControlBlock *acb =
458                 (struct AdapterControlBlock *)host->hostdata;
459
460         arcmsr_stop_adapter_bgrb(acb);
461         arcmsr_flush_adapter_cache(acb);
462 }
463
464 static int arcmsr_module_init(void)
465 {
466         int error = 0;
467
468         error = pci_register_driver(&arcmsr_pci_driver);
469         return error;
470 }
471
472 static void arcmsr_module_exit(void)
473 {
474         pci_unregister_driver(&arcmsr_pci_driver);
475 }
476 module_init(arcmsr_module_init);
477 module_exit(arcmsr_module_exit);
478
479 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
480 {
481         struct MessageUnit __iomem *reg = acb->pmu;
482         u32 orig_mask = readl(&reg->outbound_intmask);
483
484         writel(orig_mask | ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
485                         &reg->outbound_intmask);
486         return orig_mask;
487 }
488
489 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
490                 u32 orig_mask)
491 {
492         struct MessageUnit __iomem *reg = acb->pmu;
493         u32 mask;
494
495         mask = orig_mask & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
496                              ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
497         writel(mask, &reg->outbound_intmask);
498 }
499
500 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
501 {
502         struct MessageUnit __iomem *reg=acb->pmu;
503
504         writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
505         if (arcmsr_wait_msgint_ready(acb))
506                 printk(KERN_NOTICE
507                         "arcmsr%d: wait 'flush adapter cache' timeout \n"
508                         , acb->host->host_no);
509 }
510
511 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
512 {
513         struct scsi_cmnd *pcmd = ccb->pcmd;
514         struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
515
516         pcmd->result = DID_OK << 16;
517         if (sensebuffer) {
518                 int sense_data_length =
519                         sizeof (struct SENSE_DATA) < sizeof (pcmd->sense_buffer)
520                         ? sizeof (struct SENSE_DATA) : sizeof (pcmd->sense_buffer);
521                 memset(sensebuffer, 0, sizeof (pcmd->sense_buffer));
522                 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
523                 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
524                 sensebuffer->Valid = 1;
525         }
526 }
527
528 static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb)
529 {
530         struct MessageUnit __iomem *reg = acb->pmu;
531         uint32_t Index;
532         uint8_t Retries = 0x00;
533
534         do {
535                 for (Index = 0; Index < 100; Index++) {
536                         if (readl(&reg->outbound_intstatus)
537                                 & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
538                                 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
539                                         , &reg->outbound_intstatus);
540                                 return 0x00;
541                         }
542                         msleep_interruptible(10);
543                 }/*max 1 seconds*/
544         } while (Retries++ < 20);/*max 20 sec*/
545         return 0xff;
546 }
547
548 static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
549         struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
550 {
551         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
552         int8_t *psge = (int8_t *)&arcmsr_cdb->u;
553         uint32_t address_lo, address_hi;
554         int arccdbsize = 0x30;
555
556         ccb->pcmd = pcmd;
557         memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
558         arcmsr_cdb->Bus = 0;
559         arcmsr_cdb->TargetID = pcmd->device->id;
560         arcmsr_cdb->LUN = pcmd->device->lun;
561         arcmsr_cdb->Function = 1;
562         arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
563         arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
564         memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
565         if (pcmd->use_sg) {
566                 int length, sgcount, i, cdb_sgcount = 0;
567                 struct scatterlist *sl;
568
569                 /* Get Scatter Gather List from scsiport. */
570                 sl = (struct scatterlist *) pcmd->request_buffer;
571                 sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg,
572                                 pcmd->sc_data_direction);
573                 /* map stor port SG list to our iop SG List. */
574                 for (i = 0; i < sgcount; i++) {
575                         /* Get the physical address of the current data pointer */
576                         length = cpu_to_le32(sg_dma_len(sl));
577                         address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl)));
578                         address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl)));
579                         if (address_hi == 0) {
580                                 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
581
582                                 pdma_sg->address = address_lo;
583                                 pdma_sg->length = length;
584                                 psge += sizeof (struct SG32ENTRY);
585                                 arccdbsize += sizeof (struct SG32ENTRY);
586                         } else {
587                                 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
588
589                                 pdma_sg->addresshigh = address_hi;
590                                 pdma_sg->address = address_lo;
591                                 pdma_sg->length = length|IS_SG64_ADDR;
592                                 psge += sizeof (struct SG64ENTRY);
593                                 arccdbsize += sizeof (struct SG64ENTRY);
594                         }
595                         sl++;
596                         cdb_sgcount++;
597                 }
598                 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
599                 arcmsr_cdb->DataLength = pcmd->request_bufflen;
600                 if ( arccdbsize > 256)
601                         arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
602         } else if (pcmd->request_bufflen) {
603                 dma_addr_t dma_addr;
604                 dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
605                                 pcmd->request_bufflen, pcmd->sc_data_direction);
606                 pcmd->SCp.dma_handle = dma_addr;
607                 address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
608                 address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
609                 if (address_hi == 0) {
610                         struct  SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
611                         pdma_sg->address = address_lo;
612                         pdma_sg->length = pcmd->request_bufflen;
613                 } else {
614                         struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
615                         pdma_sg->addresshigh = address_hi;
616                         pdma_sg->address = address_lo;
617                         pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
618                 }
619                 arcmsr_cdb->sgcount = 1;
620                 arcmsr_cdb->DataLength = pcmd->request_bufflen;
621         }
622         if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
623                 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
624                 ccb->ccb_flags |= CCB_FLAG_WRITE;
625         }
626 }
627
628 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
629 {
630         struct MessageUnit __iomem *reg = acb->pmu;
631         uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
632         struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
633
634         atomic_inc(&acb->ccboutstandingcount);
635         ccb->startdone = ARCMSR_CCB_START;
636         if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
637                 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
638                         &reg->inbound_queueport);
639         else
640                 writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
641 }
642
643 void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb)
644 {
645         struct MessageUnit __iomem *reg = acb->pmu;
646         struct QBUFFER __iomem *pwbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
647         uint8_t __iomem *iop_data = (uint8_t __iomem *) pwbuffer->data;
648         int32_t allxfer_len = 0;
649
650         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
651                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
652                 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
653                         && (allxfer_len < 124)) {
654                         writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
655                         acb->wqbuf_firstindex++;
656                         acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
657                         iop_data++;
658                         allxfer_len++;
659                 }
660                 writel(allxfer_len, &pwbuffer->data_len);
661                 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
662                         , &reg->inbound_doorbell);
663         }
664 }
665
666 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
667 {
668         struct MessageUnit __iomem *reg = acb->pmu;
669
670         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
671         writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
672         if (arcmsr_wait_msgint_ready(acb))
673                 printk(KERN_NOTICE
674                         "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
675                         , acb->host->host_no);
676 }
677
678 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
679 {
680         dma_free_coherent(&acb->pdev->dev,
681                 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
682                 acb->dma_coherent,
683                 acb->dma_coherent_handle);
684 }
685
686 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
687 {
688         struct MessageUnit __iomem *reg = acb->pmu;
689         struct CommandControlBlock *ccb;
690         uint32_t flag_ccb, outbound_intstatus, outbound_doorbell;
691
692         outbound_intstatus = readl(&reg->outbound_intstatus)
693                 & acb->outbound_int_enable;
694         writel(outbound_intstatus, &reg->outbound_intstatus);
695         if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
696                 outbound_doorbell = readl(&reg->outbound_doorbell);
697                 writel(outbound_doorbell, &reg->outbound_doorbell);
698                 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
699                         struct QBUFFER __iomem * prbuffer =
700                                 (struct QBUFFER __iomem *) &reg->message_rbuffer;
701                         uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
702                         int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
703
704                         rqbuf_lastindex = acb->rqbuf_lastindex;
705                         rqbuf_firstindex = acb->rqbuf_firstindex;
706                         iop_len = readl(&prbuffer->data_len);
707                         my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1)
708                                         &(ARCMSR_MAX_QBUFFER - 1);
709                         if (my_empty_len >= iop_len) {
710                                 while (iop_len > 0) {
711                                         acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
712                                         acb->rqbuf_lastindex++;
713                                         acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
714                                         iop_data++;
715                                         iop_len--;
716                                 }
717                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
718                                         &reg->inbound_doorbell);
719                         } else
720                                 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
721                 }
722                 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
723                         acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
724                         if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
725                                 struct QBUFFER __iomem * pwbuffer =
726                                                 (struct QBUFFER __iomem *) &reg->message_wbuffer;
727                                 uint8_t __iomem * iop_data = (uint8_t __iomem *) pwbuffer->data;
728                                 int32_t allxfer_len = 0;
729
730                                 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
731                                 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
732                                         && (allxfer_len < 124)) {
733                                         writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
734                                         acb->wqbuf_firstindex++;
735                                         acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
736                                         iop_data++;
737                                         allxfer_len++;
738                                 }
739                                 writel(allxfer_len, &pwbuffer->data_len);
740                                 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK,
741                                         &reg->inbound_doorbell);
742                         }
743                         if (acb->wqbuf_firstindex == acb->wqbuf_lastindex)
744                                 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
745                 }
746         }
747         if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
748                 int id, lun;
749                 /*
750                 ****************************************************************
751                 **               areca cdb command done
752                 ****************************************************************
753                 */
754                 while (1) {
755                         if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF)
756                                 break;/*chip FIFO no ccb for completion already*/
757                         /* check if command done with no error*/
758                         ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
759                                 (flag_ccb << 5));
760                         if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
761                                 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
762                                         struct scsi_cmnd *abortcmd=ccb->pcmd;
763                                         if (abortcmd) {
764                                         abortcmd->result |= DID_ABORT >> 16;
765                                         arcmsr_ccb_complete(ccb, 1);
766                                         printk(KERN_NOTICE
767                                                 "arcmsr%d: ccb='0x%p' isr got aborted command \n"
768                                                 , acb->host->host_no, ccb);
769                                         }
770                                         continue;
771                                 }
772                                 printk(KERN_NOTICE
773                                         "arcmsr%d: isr get an illegal ccb command done acb='0x%p'"
774                                         "ccb='0x%p' ccbacb='0x%p' startdone = 0x%x"
775                                         " ccboutstandingcount=%d \n"
776                                         , acb->host->host_no
777                                         , acb
778                                         , ccb
779                                         , ccb->acb
780                                         , ccb->startdone
781                                         , atomic_read(&acb->ccboutstandingcount));
782                                 continue;
783                         }
784                         id = ccb->pcmd->device->id;
785                         lun = ccb->pcmd->device->lun;
786                         if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
787                                 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
788                                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
789                                 ccb->pcmd->result = DID_OK << 16;
790                                 arcmsr_ccb_complete(ccb, 1);
791                         } else {
792                                 switch(ccb->arcmsr_cdb.DeviceStatus) {
793                                 case ARCMSR_DEV_SELECT_TIMEOUT: {
794                                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
795                                                 ccb->pcmd->result = DID_TIME_OUT << 16;
796                                                 arcmsr_ccb_complete(ccb, 1);
797                                         }
798                                         break;
799                                 case ARCMSR_DEV_ABORTED:
800                                 case ARCMSR_DEV_INIT_FAIL: {
801                                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
802                                                 ccb->pcmd->result = DID_BAD_TARGET << 16;
803                                                 arcmsr_ccb_complete(ccb, 1);
804                                         }
805                                         break;
806                                 case ARCMSR_DEV_CHECK_CONDITION: {
807                                                 acb->devstate[id][lun] = ARECA_RAID_GOOD;
808                                                 arcmsr_report_sense_info(ccb);
809                                                 arcmsr_ccb_complete(ccb, 1);
810                                         }
811                                         break;
812                                 default:
813                                         printk(KERN_NOTICE
814                                                 "arcmsr%d: scsi id=%d lun=%d"
815                                                 " isr get command error done,"
816                                                 "but got unknown DeviceStatus = 0x%x \n"
817                                                 , acb->host->host_no
818                                                 , id
819                                                 , lun
820                                                 , ccb->arcmsr_cdb.DeviceStatus);
821                                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
822                                                 ccb->pcmd->result = DID_NO_CONNECT << 16;
823                                                 arcmsr_ccb_complete(ccb, 1);
824                                         break;
825                                 }
826                         }
827                 }/*drain reply FIFO*/
828         }
829         if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
830                 return IRQ_NONE;
831         return IRQ_HANDLED;
832 }
833
834 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
835 {
836         if (acb) {
837                 /* stop adapter background rebuild */
838                 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
839                         acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
840                         arcmsr_stop_adapter_bgrb(acb);
841                         arcmsr_flush_adapter_cache(acb);
842                 }
843         }
844 }
845
846 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd)
847 {
848         struct MessageUnit __iomem *reg = acb->pmu;
849         struct CMD_MESSAGE_FIELD *pcmdmessagefld;
850         int retvalue = 0, transfer_len = 0;
851         char *buffer;
852         uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
853                                                 (uint32_t ) cmd->cmnd[6] << 16 |
854                                                 (uint32_t ) cmd->cmnd[7] << 8  |
855                                                 (uint32_t ) cmd->cmnd[8];
856                                         /* 4 bytes: Areca io control code */
857         if (cmd->use_sg) {
858                 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
859
860                 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
861                 if (cmd->use_sg > 1) {
862                         retvalue = ARCMSR_MESSAGE_FAIL;
863                         goto message_out;
864                 }
865                 transfer_len += sg->length;
866         } else {
867                 buffer = cmd->request_buffer;
868                 transfer_len = cmd->request_bufflen;
869         }
870         if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
871                 retvalue = ARCMSR_MESSAGE_FAIL;
872                 goto message_out;
873         }
874         pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
875         switch(controlcode) {
876         case ARCMSR_MESSAGE_READ_RQBUFFER: {
877                         unsigned long *ver_addr;
878                         dma_addr_t buf_handle;
879                         uint8_t *pQbuffer, *ptmpQbuffer;
880                         int32_t allxfer_len = 0;
881
882                         ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
883                         if (!ver_addr) {
884                                 retvalue = ARCMSR_MESSAGE_FAIL;
885                                 goto message_out;
886                         }
887                         ptmpQbuffer = (uint8_t *) ver_addr;
888                         while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
889                                 && (allxfer_len < 1031)) {
890                                 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
891                                 memcpy(ptmpQbuffer, pQbuffer, 1);
892                                 acb->rqbuf_firstindex++;
893                                 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
894                                 ptmpQbuffer++;
895                                 allxfer_len++;
896                         }
897                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
898                                 struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
899                                                         &reg->message_rbuffer;
900                                 uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
901                                 int32_t iop_len;
902
903                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
904                                 iop_len = readl(&prbuffer->data_len);
905                                 while (iop_len > 0) {
906                                         acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
907                                         acb->rqbuf_lastindex++;
908                                         acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
909                                         iop_data++;
910                                         iop_len--;
911                                 }
912                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
913                                                 &reg->inbound_doorbell);
914                         }
915                         memcpy(pcmdmessagefld->messagedatabuffer,
916                                 (uint8_t *)ver_addr, allxfer_len);
917                         pcmdmessagefld->cmdmessage.Length = allxfer_len;
918                         pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
919                         pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
920                 }
921                 break;
922         case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
923                         unsigned long *ver_addr;
924                         dma_addr_t buf_handle;
925                         int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
926                         uint8_t *pQbuffer, *ptmpuserbuffer;
927
928                         ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
929                         if (!ver_addr) {
930                                 retvalue = ARCMSR_MESSAGE_FAIL;
931                                 goto message_out;
932                         }
933                         ptmpuserbuffer = (uint8_t *)ver_addr;
934                         user_len = pcmdmessagefld->cmdmessage.Length;
935                         memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
936                         wqbuf_lastindex = acb->wqbuf_lastindex;
937                         wqbuf_firstindex = acb->wqbuf_firstindex;
938                         if (wqbuf_lastindex != wqbuf_firstindex) {
939                                 struct SENSE_DATA *sensebuffer =
940                                         (struct SENSE_DATA *)cmd->sense_buffer;
941                                 arcmsr_post_Qbuffer(acb);
942                                 /* has error report sensedata */
943                                 sensebuffer->ErrorCode = 0x70;
944                                 sensebuffer->SenseKey = ILLEGAL_REQUEST;
945                                 sensebuffer->AdditionalSenseLength = 0x0A;
946                                 sensebuffer->AdditionalSenseCode = 0x20;
947                                 sensebuffer->Valid = 1;
948                                 retvalue = ARCMSR_MESSAGE_FAIL;
949                         } else {
950                                 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
951                                                 &(ARCMSR_MAX_QBUFFER - 1);
952                                 if (my_empty_len >= user_len) {
953                                         while (user_len > 0) {
954                                                 pQbuffer =
955                                                 &acb->wqbuffer[acb->wqbuf_lastindex];
956                                                 memcpy(pQbuffer, ptmpuserbuffer, 1);
957                                                 acb->wqbuf_lastindex++;
958                                                 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
959                                                 ptmpuserbuffer++;
960                                                 user_len--;
961                                         }
962                                         if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
963                                                 acb->acb_flags &=
964                                                         ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
965                                                 arcmsr_post_Qbuffer(acb);
966                                         }
967                                 } else {
968                                         /* has error report sensedata */
969                                         struct SENSE_DATA *sensebuffer =
970                                                 (struct SENSE_DATA *)cmd->sense_buffer;
971                                         sensebuffer->ErrorCode = 0x70;
972                                         sensebuffer->SenseKey = ILLEGAL_REQUEST;
973                                         sensebuffer->AdditionalSenseLength = 0x0A;
974                                         sensebuffer->AdditionalSenseCode = 0x20;
975                                         sensebuffer->Valid = 1;
976                                         retvalue = ARCMSR_MESSAGE_FAIL;
977                                 }
978                         }
979                         pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
980                 }
981                 break;
982         case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
983                         uint8_t *pQbuffer = acb->rqbuffer;
984
985                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
986                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
987                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
988                                         &reg->inbound_doorbell);
989                         }
990                         acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
991                         acb->rqbuf_firstindex = 0;
992                         acb->rqbuf_lastindex = 0;
993                         memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
994                         pcmdmessagefld->cmdmessage.ReturnCode =
995                                 ARCMSR_MESSAGE_RETURNCODE_OK;
996                 }
997                 break;
998         case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
999                         uint8_t *pQbuffer = acb->wqbuffer;
1000
1001                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1002                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1003                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1004                                                 , &reg->inbound_doorbell);
1005                         }
1006                         acb->acb_flags |=
1007                                 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1008                                         ACB_F_MESSAGE_WQBUFFER_READED);
1009                         acb->wqbuf_firstindex = 0;
1010                         acb->wqbuf_lastindex = 0;
1011                         memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1012                         pcmdmessagefld->cmdmessage.ReturnCode =
1013                                 ARCMSR_MESSAGE_RETURNCODE_OK;
1014                 }
1015                 break;
1016         case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1017                         uint8_t *pQbuffer;
1018
1019                         if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1020                                 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1021                                 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
1022                                                 , &reg->inbound_doorbell);
1023                         }
1024                         acb->acb_flags |=
1025                                 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1026                                 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1027                                 | ACB_F_MESSAGE_WQBUFFER_READED);
1028                         acb->rqbuf_firstindex = 0;
1029                         acb->rqbuf_lastindex = 0;
1030                         acb->wqbuf_firstindex = 0;
1031                         acb->wqbuf_lastindex = 0;
1032                         pQbuffer = acb->rqbuffer;
1033                         memset(pQbuffer, 0, sizeof (struct QBUFFER));
1034                         pQbuffer = acb->wqbuffer;
1035                         memset(pQbuffer, 0, sizeof (struct QBUFFER));
1036                         pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1037                 }
1038                 break;
1039         case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1040                         pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1041                 }
1042                 break;
1043         case ARCMSR_MESSAGE_SAY_HELLO: {
1044                         int8_t * hello_string = "Hello! I am ARCMSR";
1045
1046                         memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1047                                 , (int16_t)strlen(hello_string));
1048                         pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1049                 }
1050                 break;
1051         case ARCMSR_MESSAGE_SAY_GOODBYE:
1052                 arcmsr_iop_parking(acb);
1053                 break;
1054         case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1055                 arcmsr_flush_adapter_cache(acb);
1056                 break;
1057         default:
1058                 retvalue = ARCMSR_MESSAGE_FAIL;
1059         }
1060  message_out:
1061         if (cmd->use_sg) {
1062                 struct scatterlist *sg;
1063
1064                 sg = (struct scatterlist *) cmd->request_buffer;
1065                 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1066         }
1067         return retvalue;
1068 }
1069
1070 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1071 {
1072         struct list_head *head = &acb->ccb_free_list;
1073         struct CommandControlBlock *ccb = NULL;
1074
1075         if (!list_empty(head)) {
1076                 ccb = list_entry(head->next, struct CommandControlBlock, list);
1077                 list_del(head->next);
1078         }
1079         return ccb;
1080 }
1081
1082 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1083                 struct scsi_cmnd *cmd)
1084 {
1085         switch (cmd->cmnd[0]) {
1086         case INQUIRY: {
1087                 unsigned char inqdata[36];
1088                 char *buffer;
1089
1090                 if (cmd->device->lun) {
1091                         cmd->result = (DID_TIME_OUT << 16);
1092                         cmd->scsi_done(cmd);
1093                         return;
1094                 }
1095                 inqdata[0] = TYPE_PROCESSOR;
1096                 /* Periph Qualifier & Periph Dev Type */
1097                 inqdata[1] = 0;
1098                 /* rem media bit & Dev Type Modifier */
1099                 inqdata[2] = 0;
1100                 /* ISO,ECMA,& ANSI versions */
1101                 inqdata[4] = 31;
1102                 /* length of additional data */
1103                 strncpy(&inqdata[8], "Areca   ", 8);
1104                 /* Vendor Identification */
1105                 strncpy(&inqdata[16], "RAID controller ", 16);
1106                 /* Product Identification */
1107                 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1108                 if (cmd->use_sg) {
1109                         struct scatterlist *sg;
1110
1111                         sg = (struct scatterlist *) cmd->request_buffer;
1112                         buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1113                 } else {
1114                         buffer = cmd->request_buffer;
1115                 }
1116                 memcpy(buffer, inqdata, sizeof(inqdata));
1117                 if (cmd->use_sg) {
1118                         struct scatterlist *sg;
1119
1120                         sg = (struct scatterlist *) cmd->request_buffer;
1121                         kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1122                 }
1123                 cmd->scsi_done(cmd);
1124         }
1125         break;
1126         case WRITE_BUFFER:
1127         case READ_BUFFER: {
1128                 if (arcmsr_iop_message_xfer(acb, cmd))
1129                         cmd->result = (DID_ERROR << 16);
1130                 cmd->scsi_done(cmd);
1131         }
1132         break;
1133         default:
1134                 cmd->scsi_done(cmd);
1135         }
1136 }
1137
1138 static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1139         void (* done)(struct scsi_cmnd *))
1140 {
1141         struct Scsi_Host *host = cmd->device->host;
1142         struct AdapterControlBlock *acb =
1143                 (struct AdapterControlBlock *) host->hostdata;
1144         struct CommandControlBlock *ccb;
1145         int target = cmd->device->id;
1146         int lun = cmd->device->lun;
1147
1148         cmd->scsi_done = done;
1149         cmd->host_scribble = NULL;
1150         cmd->result = 0;
1151         if (acb->acb_flags & ACB_F_BUS_RESET) {
1152                 printk(KERN_NOTICE "arcmsr%d: bus reset"
1153                         " and return busy \n"
1154                         , acb->host->host_no);
1155                 return SCSI_MLQUEUE_HOST_BUSY;
1156         }
1157         if(target == 16) {
1158                 /* virtual device for iop message transfer */
1159                 arcmsr_handle_virtual_command(acb, cmd);
1160                 return 0;
1161         }
1162         if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1163                 uint8_t block_cmd;
1164
1165                 block_cmd = cmd->cmnd[0] & 0x0f;
1166                 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1167                         printk(KERN_NOTICE
1168                                 "arcmsr%d: block 'read/write'"
1169                                 "command with gone raid volume"
1170                                 " Cmd=%2x, TargetId=%d, Lun=%d \n"
1171                                 , acb->host->host_no
1172                                 , cmd->cmnd[0]
1173                                 , target, lun);
1174                         cmd->result = (DID_NO_CONNECT << 16);
1175                         cmd->scsi_done(cmd);
1176                         return 0;
1177                 }
1178         }
1179         if (atomic_read(&acb->ccboutstandingcount) >=
1180                         ARCMSR_MAX_OUTSTANDING_CMD)
1181                 return SCSI_MLQUEUE_HOST_BUSY;
1182
1183         ccb = arcmsr_get_freeccb(acb);
1184         if (!ccb)
1185                 return SCSI_MLQUEUE_HOST_BUSY;
1186         arcmsr_build_ccb(acb, ccb, cmd);
1187         arcmsr_post_ccb(acb, ccb);
1188         return 0;
1189 }
1190
1191 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1192 {
1193         struct MessageUnit __iomem *reg = acb->pmu;
1194         char *acb_firm_model = acb->firm_model;
1195         char *acb_firm_version = acb->firm_version;
1196         char __iomem *iop_firm_model = (char __iomem *) &reg->message_rwbuffer[15];
1197         char __iomem *iop_firm_version = (char __iomem *) &reg->message_rwbuffer[17];
1198         int count;
1199
1200         writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1201         if (arcmsr_wait_msgint_ready(acb))
1202                 printk(KERN_NOTICE
1203                         "arcmsr%d: wait "
1204                         "'get adapter firmware miscellaneous data' timeout \n"
1205                         , acb->host->host_no);
1206         count = 8;
1207         while (count) {
1208                 *acb_firm_model = readb(iop_firm_model);
1209                 acb_firm_model++;
1210                 iop_firm_model++;
1211                 count--;
1212         }
1213         count = 16;
1214         while (count) {
1215                 *acb_firm_version = readb(iop_firm_version);
1216                 acb_firm_version++;
1217                 iop_firm_version++;
1218                 count--;
1219         }
1220         printk(KERN_INFO
1221                 "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1222                 , acb->host->host_no
1223                 , acb->firm_version);
1224         acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1225         acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1226         acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1227         acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1228 }
1229
1230 static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1231         struct CommandControlBlock *poll_ccb)
1232 {
1233         struct MessageUnit __iomem *reg = acb->pmu;
1234         struct CommandControlBlock *ccb;
1235         uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1236         int id, lun;
1237
1238  polling_ccb_retry:
1239         poll_count++;
1240         outbound_intstatus = readl(&reg->outbound_intstatus)
1241                                         & acb->outbound_int_enable;
1242         writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1243         while (1) {
1244                 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1245                         if (poll_ccb_done)
1246                                 break;
1247                         else {
1248                                 msleep(25);
1249                                 if (poll_count > 100)
1250                                         break;
1251                                 goto polling_ccb_retry;
1252                         }
1253                 }
1254                 ccb = (struct CommandControlBlock *)
1255                         (acb->vir2phy_offset + (flag_ccb << 5));
1256                 if ((ccb->acb != acb) ||
1257                         (ccb->startdone != ARCMSR_CCB_START)) {
1258                         if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
1259                                 (ccb == poll_ccb)) {
1260                                 printk(KERN_NOTICE
1261                                         "arcmsr%d: scsi id=%d lun=%d ccb='0x%p'"
1262                                         " poll command abort successfully \n"
1263                                         , acb->host->host_no
1264                                         , ccb->pcmd->device->id
1265                                         , ccb->pcmd->device->lun
1266                                         , ccb);
1267                                 ccb->pcmd->result = DID_ABORT << 16;
1268                                 arcmsr_ccb_complete(ccb, 1);
1269                                 poll_ccb_done = 1;
1270                                 continue;
1271                         }
1272                         printk(KERN_NOTICE
1273                                 "arcmsr%d: polling get an illegal ccb"
1274                                 " command done ccb='0x%p'"
1275                                 "ccboutstandingcount=%d \n"
1276                                 , acb->host->host_no
1277                                 , ccb
1278                                 , atomic_read(&acb->ccboutstandingcount));
1279                         continue;
1280                 }
1281                 id = ccb->pcmd->device->id;
1282                 lun = ccb->pcmd->device->lun;
1283                 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
1284                         if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1285                                 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1286                         ccb->pcmd->result = DID_OK << 16;
1287                         arcmsr_ccb_complete(ccb, 1);
1288                 } else {
1289                         switch(ccb->arcmsr_cdb.DeviceStatus) {
1290                         case ARCMSR_DEV_SELECT_TIMEOUT: {
1291                                         acb->devstate[id][lun] = ARECA_RAID_GONE;
1292                                         ccb->pcmd->result = DID_TIME_OUT << 16;
1293                                         arcmsr_ccb_complete(ccb, 1);
1294                                 }
1295                                 break;
1296                         case ARCMSR_DEV_ABORTED:
1297                         case ARCMSR_DEV_INIT_FAIL: {
1298                                         acb->devstate[id][lun] = ARECA_RAID_GONE;
1299                                         ccb->pcmd->result = DID_BAD_TARGET << 16;
1300                                         arcmsr_ccb_complete(ccb, 1);
1301                                 }
1302                                 break;
1303                         case ARCMSR_DEV_CHECK_CONDITION: {
1304                                         acb->devstate[id][lun] = ARECA_RAID_GOOD;
1305                                         arcmsr_report_sense_info(ccb);
1306                                         arcmsr_ccb_complete(ccb, 1);
1307                                 }
1308                                 break;
1309                         default:
1310                                 printk(KERN_NOTICE
1311                                         "arcmsr%d: scsi id=%d lun=%d"
1312                                         " polling and getting command error done"
1313                                         "but got unknown DeviceStatus = 0x%x \n"
1314                                         , acb->host->host_no
1315                                         , id
1316                                         , lun
1317                                         , ccb->arcmsr_cdb.DeviceStatus);
1318                                 acb->devstate[id][lun] = ARECA_RAID_GONE;
1319                                 ccb->pcmd->result = DID_BAD_TARGET << 16;
1320                                 arcmsr_ccb_complete(ccb, 1);
1321                                 break;
1322                         }
1323                 }
1324         }
1325 }
1326
1327 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1328 {
1329         struct MessageUnit __iomem *reg = acb->pmu;
1330         uint32_t intmask_org, mask, outbound_doorbell, firmware_state = 0;
1331
1332         do {
1333                 firmware_state = readl(&reg->outbound_msgaddr1);
1334         } while (!(firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK));
1335         intmask_org = readl(&reg->outbound_intmask)
1336                         | ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
1337         arcmsr_get_firmware_spec(acb);
1338
1339         acb->acb_flags |= ACB_F_MSG_START_BGRB;
1340         writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
1341         if (arcmsr_wait_msgint_ready(acb)) {
1342                 printk(KERN_NOTICE "arcmsr%d: "
1343                         "wait 'start adapter background rebulid' timeout\n",
1344                         acb->host->host_no);
1345         }
1346
1347         outbound_doorbell = readl(&reg->outbound_doorbell);
1348         writel(outbound_doorbell, &reg->outbound_doorbell);
1349         writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1350         mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
1351                         | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
1352         writel(intmask_org & mask, &reg->outbound_intmask);
1353         acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1354         acb->acb_flags |= ACB_F_IOP_INITED;
1355 }
1356
1357 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
1358 {
1359         struct MessageUnit __iomem *reg = acb->pmu;
1360         struct CommandControlBlock *ccb;
1361         uint32_t intmask_org;
1362         int i = 0;
1363
1364         if (atomic_read(&acb->ccboutstandingcount) != 0) {
1365                 /* talk to iop 331 outstanding command aborted */
1366                 arcmsr_abort_allcmd(acb);
1367                 /* wait for 3 sec for all command aborted*/
1368                 msleep_interruptible(3000);
1369                 /* disable all outbound interrupt */
1370                 intmask_org = arcmsr_disable_outbound_ints(acb);
1371                 /* clear all outbound posted Q */
1372                 for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
1373                         readl(&reg->outbound_queueport);
1374                 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1375                         ccb = acb->pccb_pool[i];
1376                         if ((ccb->startdone == ARCMSR_CCB_START) ||
1377                                 (ccb->startdone == ARCMSR_CCB_ABORTED)) {
1378                                 ccb->startdone = ARCMSR_CCB_ABORTED;
1379                                 ccb->pcmd->result = DID_ABORT << 16;
1380                                 arcmsr_ccb_complete(ccb, 1);
1381                         }
1382                 }
1383                 /* enable all outbound interrupt */
1384                 arcmsr_enable_outbound_ints(acb, intmask_org);
1385         }
1386         atomic_set(&acb->ccboutstandingcount, 0);
1387 }
1388
1389 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
1390 {
1391         struct AdapterControlBlock *acb =
1392                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1393         int i;
1394
1395         acb->num_resets++;
1396         acb->acb_flags |= ACB_F_BUS_RESET;
1397         for (i = 0; i < 400; i++) {
1398                 if (!atomic_read(&acb->ccboutstandingcount))
1399                         break;
1400                 arcmsr_interrupt(acb);
1401                 msleep(25);
1402         }
1403         arcmsr_iop_reset(acb);
1404         acb->acb_flags &= ~ACB_F_BUS_RESET;
1405         return SUCCESS;
1406 }
1407
1408 static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
1409                 struct CommandControlBlock *ccb)
1410 {
1411         u32 intmask;
1412
1413         ccb->startdone = ARCMSR_CCB_ABORTED;
1414
1415         /*
1416         ** Wait for 3 sec for all command done.
1417         */
1418         msleep_interruptible(3000);
1419
1420         intmask = arcmsr_disable_outbound_ints(acb);
1421         arcmsr_polling_ccbdone(acb, ccb);
1422         arcmsr_enable_outbound_ints(acb, intmask);
1423 }
1424
1425 static int arcmsr_abort(struct scsi_cmnd *cmd)
1426 {
1427         struct AdapterControlBlock *acb =
1428                 (struct AdapterControlBlock *)cmd->device->host->hostdata;
1429         int i = 0;
1430
1431         printk(KERN_NOTICE
1432                 "arcmsr%d: abort device command of scsi id=%d lun=%d \n",
1433                 acb->host->host_no, cmd->device->id, cmd->device->lun);
1434         acb->num_aborts++;
1435
1436         /*
1437         ************************************************
1438         ** the all interrupt service routine is locked
1439         ** we need to handle it as soon as possible and exit
1440         ************************************************
1441         */
1442         if (!atomic_read(&acb->ccboutstandingcount))
1443                 return SUCCESS;
1444
1445         for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1446                 struct CommandControlBlock *ccb = acb->pccb_pool[i];
1447                 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
1448                         arcmsr_abort_one_cmd(acb, ccb);
1449                         break;
1450                 }
1451         }
1452
1453         return SUCCESS;
1454 }
1455
1456 static const char *arcmsr_info(struct Scsi_Host *host)
1457 {
1458         struct AdapterControlBlock *acb =
1459                 (struct AdapterControlBlock *) host->hostdata;
1460         static char buf[256];
1461         char *type;
1462         int raid6 = 1;
1463
1464         switch (acb->pdev->device) {
1465         case PCI_DEVICE_ID_ARECA_1110:
1466         case PCI_DEVICE_ID_ARECA_1210:
1467                 raid6 = 0;
1468                 /*FALLTHRU*/
1469         case PCI_DEVICE_ID_ARECA_1120:
1470         case PCI_DEVICE_ID_ARECA_1130:
1471         case PCI_DEVICE_ID_ARECA_1160:
1472         case PCI_DEVICE_ID_ARECA_1170:
1473         case PCI_DEVICE_ID_ARECA_1220:
1474         case PCI_DEVICE_ID_ARECA_1230:
1475         case PCI_DEVICE_ID_ARECA_1260:
1476         case PCI_DEVICE_ID_ARECA_1270:
1477         case PCI_DEVICE_ID_ARECA_1280:
1478                 type = "SATA";
1479                 break;
1480         case PCI_DEVICE_ID_ARECA_1380:
1481         case PCI_DEVICE_ID_ARECA_1381:
1482         case PCI_DEVICE_ID_ARECA_1680:
1483         case PCI_DEVICE_ID_ARECA_1681:
1484                 type = "SAS";
1485                 break;
1486         default:
1487                 type = "X-TYPE";
1488                 break;
1489         }
1490         sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n        %s",
1491                         type, raid6 ? "( RAID6 capable)" : "",
1492                         ARCMSR_DRIVER_VERSION);
1493         return buf;
1494 }
1495
1496