2 * HighPoint RR3xxx controller driver for Linux
3 * Copyright (C) 2006 HighPoint Technologies, Inc. All Rights Reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Please report bugs/comments/suggestions to linux@highpoint-tech.com
16 * For more information, visit http://www.highpoint-tech.com
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/string.h>
21 #include <linux/kernel.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/delay.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <linux/hdreg.h>
29 #include <asm/uaccess.h>
31 #include <asm/div64.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_host.h>
40 MODULE_AUTHOR("HighPoint Technologies, Inc.");
41 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx SATA Controller Driver");
43 static char driver_name[] = "hptiop";
44 static const char driver_name_long[] = "RocketRAID 3xxx SATA Controller driver";
45 static const char driver_ver[] = "v1.0 (060426)";
47 static DEFINE_SPINLOCK(hptiop_hba_list_lock);
48 static LIST_HEAD(hptiop_hba_list);
49 static int hptiop_cdev_major = -1;
51 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag);
52 static void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag);
53 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
55 static inline void hptiop_pci_posting_flush(struct hpt_iopmu __iomem *iop)
57 readl(&iop->outbound_intstatus);
60 static int iop_wait_ready(struct hpt_iopmu __iomem *iop, u32 millisec)
65 for (i = 0; i < millisec; i++) {
66 req = readl(&iop->inbound_queue);
67 if (req != IOPMU_QUEUE_EMPTY)
72 if (req != IOPMU_QUEUE_EMPTY) {
73 writel(req, &iop->outbound_queue);
74 hptiop_pci_posting_flush(iop);
81 static void hptiop_request_callback(struct hptiop_hba *hba, u32 tag)
83 if ((tag & IOPMU_QUEUE_MASK_HOST_BITS) == IOPMU_QUEUE_ADDR_HOST_BIT)
84 return hptiop_host_request_callback(hba,
85 tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
87 return hptiop_iop_request_callback(hba, tag);
90 static inline void hptiop_drain_outbound_queue(struct hptiop_hba *hba)
94 while ((req = readl(&hba->iop->outbound_queue)) != IOPMU_QUEUE_EMPTY) {
96 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
97 hptiop_request_callback(hba, req);
99 struct hpt_iop_request_header __iomem * p;
101 p = (struct hpt_iop_request_header __iomem *)
102 ((char __iomem *)hba->iop + req);
104 if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
105 if (readl(&p->context))
106 hptiop_request_callback(hba, req);
108 writel(1, &p->context);
111 hptiop_request_callback(hba, req);
116 static int __iop_intr(struct hptiop_hba *hba)
118 struct hpt_iopmu __iomem *iop = hba->iop;
122 status = readl(&iop->outbound_intstatus);
124 if (status & IOPMU_OUTBOUND_INT_MSG0) {
125 u32 msg = readl(&iop->outbound_msgaddr0);
126 dprintk("received outbound msg %x\n", msg);
127 writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
128 hptiop_message_callback(hba, msg);
132 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
133 hptiop_drain_outbound_queue(hba);
140 static int iop_send_sync_request(struct hptiop_hba *hba,
141 void __iomem *_req, u32 millisec)
143 struct hpt_iop_request_header __iomem *req = _req;
146 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST,
149 writel(0, &req->context);
151 writel((unsigned long)req - (unsigned long)hba->iop,
152 &hba->iop->inbound_queue);
154 hptiop_pci_posting_flush(hba->iop);
156 for (i = 0; i < millisec; i++) {
158 if (readl(&req->context))
166 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
172 writel(msg, &hba->iop->inbound_msgaddr0);
174 hptiop_pci_posting_flush(hba->iop);
176 for (i = 0; i < millisec; i++) {
177 spin_lock_irq(hba->host->host_lock);
179 spin_unlock_irq(hba->host->host_lock);
185 return hba->msg_done? 0 : -1;
188 static int iop_get_config(struct hptiop_hba *hba,
189 struct hpt_iop_request_get_config *config)
192 struct hpt_iop_request_get_config __iomem *req;
194 req32 = readl(&hba->iop->inbound_queue);
195 if (req32 == IOPMU_QUEUE_EMPTY)
198 req = (struct hpt_iop_request_get_config __iomem *)
199 ((unsigned long)hba->iop + req32);
201 writel(0, &req->header.flags);
202 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
203 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
204 writel(IOP_RESULT_PENDING, &req->header.result);
206 if (iop_send_sync_request(hba, req, 20000)) {
207 dprintk("Get config send cmd failed\n");
211 memcpy_fromio(config, req, sizeof(*config));
212 writel(req32, &hba->iop->outbound_queue);
216 static int iop_set_config(struct hptiop_hba *hba,
217 struct hpt_iop_request_set_config *config)
220 struct hpt_iop_request_set_config __iomem *req;
222 req32 = readl(&hba->iop->inbound_queue);
223 if (req32 == IOPMU_QUEUE_EMPTY)
226 req = (struct hpt_iop_request_set_config __iomem *)
227 ((unsigned long)hba->iop + req32);
229 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
230 (u8 *)config + sizeof(struct hpt_iop_request_header),
231 sizeof(struct hpt_iop_request_set_config) -
232 sizeof(struct hpt_iop_request_header));
234 writel(0, &req->header.flags);
235 writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
236 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
237 writel(IOP_RESULT_PENDING, &req->header.result);
239 if (iop_send_sync_request(hba, req, 20000)) {
240 dprintk("Set config send cmd failed\n");
244 writel(req32, &hba->iop->outbound_queue);
248 static int hptiop_initialize_iop(struct hptiop_hba *hba)
250 struct hpt_iopmu __iomem *iop = hba->iop;
252 /* enable interrupts */
253 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
254 &iop->outbound_intmask);
256 hba->initialized = 1;
258 /* start background tasks */
259 if (iop_send_sync_msg(hba,
260 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
261 printk(KERN_ERR "scsi%d: fail to start background task\n",
268 static int hptiop_map_pci_bar(struct hptiop_hba *hba)
270 u32 mem_base_phy, length;
271 void __iomem *mem_base_virt;
272 struct pci_dev *pcidev = hba->pcidev;
274 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM)) {
275 printk(KERN_ERR "scsi%d: pci resource invalid\n",
280 mem_base_phy = pci_resource_start(pcidev, 0);
281 length = pci_resource_len(pcidev, 0);
282 mem_base_virt = ioremap(mem_base_phy, length);
284 if (!mem_base_virt) {
285 printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
290 hba->iop = mem_base_virt;
291 dprintk("hptiop_map_pci_bar: iop=%p\n", hba->iop);
295 static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
297 dprintk("iop message 0x%x\n", msg);
299 if (!hba->initialized)
302 if (msg == IOPMU_INBOUND_MSG0_RESET) {
303 atomic_set(&hba->resetting, 0);
304 wake_up(&hba->reset_wq);
306 else if (msg <= IOPMU_INBOUND_MSG0_MAX)
310 static inline struct hptiop_request *get_req(struct hptiop_hba *hba)
312 struct hptiop_request *ret;
314 dprintk("get_req : req=%p\n", hba->req_list);
318 hba->req_list = ret->next;
323 static inline void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
325 dprintk("free_req(%d, %p)\n", req->index, req);
326 req->next = hba->req_list;
330 static void hptiop_host_request_callback(struct hptiop_hba *hba, u32 tag)
332 struct hpt_iop_request_scsi_command *req;
333 struct scsi_cmnd *scp;
335 req = (struct hpt_iop_request_scsi_command *)hba->reqs[tag].req_virt;
336 dprintk("hptiop_host_request_callback: req=%p, type=%d, "
337 "result=%d, context=0x%x tag=%d\n",
338 req, req->header.type, req->header.result,
339 req->header.context, tag);
341 BUG_ON(!req->header.result);
342 BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
344 scp = hba->reqs[tag].scp;
346 if (HPT_SCP(scp)->mapped) {
348 pci_unmap_sg(hba->pcidev,
349 (struct scatterlist *)scp->request_buffer,
351 scp->sc_data_direction
354 pci_unmap_single(hba->pcidev,
355 HPT_SCP(scp)->dma_handle,
356 scp->request_bufflen,
357 scp->sc_data_direction
361 switch (le32_to_cpu(req->header.result)) {
362 case IOP_RESULT_SUCCESS:
363 scp->result = (DID_OK<<16);
365 case IOP_RESULT_BAD_TARGET:
366 scp->result = (DID_BAD_TARGET<<16);
368 case IOP_RESULT_BUSY:
369 scp->result = (DID_BUS_BUSY<<16);
371 case IOP_RESULT_RESET:
372 scp->result = (DID_RESET<<16);
374 case IOP_RESULT_FAIL:
375 scp->result = (DID_ERROR<<16);
377 case IOP_RESULT_INVALID_REQUEST:
378 scp->result = (DID_ABORT<<16);
380 case IOP_RESULT_MODE_SENSE_CHECK_CONDITION:
381 scp->result = SAM_STAT_CHECK_CONDITION;
382 memset(&scp->sense_buffer,
383 0, sizeof(scp->sense_buffer));
384 memcpy(&scp->sense_buffer,
385 &req->sg_list, le32_to_cpu(req->dataxfer_length));
389 scp->result = ((DRIVER_INVALID|SUGGEST_ABORT)<<24) |
394 dprintk("scsi_done(%p)\n", scp);
396 free_req(hba, &hba->reqs[tag]);
399 void hptiop_iop_request_callback(struct hptiop_hba *hba, u32 tag)
401 struct hpt_iop_request_header __iomem *req;
402 struct hpt_iop_request_ioctl_command __iomem *p;
403 struct hpt_ioctl_k *arg;
405 req = (struct hpt_iop_request_header __iomem *)
406 ((unsigned long)hba->iop + tag);
407 dprintk("hptiop_iop_request_callback: req=%p, type=%d, "
408 "result=%d, context=0x%x tag=%d\n",
409 req, readl(&req->type), readl(&req->result),
410 readl(&req->context), tag);
412 BUG_ON(!readl(&req->result));
413 BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
415 p = (struct hpt_iop_request_ioctl_command __iomem *)req;
416 arg = (struct hpt_ioctl_k *)(unsigned long)
417 (readl(&req->context) |
418 ((u64)readl(&req->context_hi32)<<32));
420 if (readl(&req->result) == IOP_RESULT_SUCCESS) {
421 arg->result = HPT_IOCTL_RESULT_OK;
423 if (arg->outbuf_size)
424 memcpy_fromio(arg->outbuf,
425 &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
428 if (arg->bytes_returned)
429 *arg->bytes_returned = arg->outbuf_size;
432 arg->result = HPT_IOCTL_RESULT_FAILED;
435 writel(tag, &hba->iop->outbound_queue);
438 static irqreturn_t hptiop_intr(int irq, void *dev_id, struct pt_regs *regs)
440 struct hptiop_hba *hba = dev_id;
444 spin_lock_irqsave(hba->host->host_lock, flags);
445 handled = __iop_intr(hba);
446 spin_unlock_irqrestore(hba->host->host_lock, flags);
451 static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
453 struct Scsi_Host *host = scp->device->host;
454 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
455 struct scatterlist *sglist = (struct scatterlist *)scp->request_buffer;
458 * though we'll not get non-use_sg fields anymore,
459 * keep use_sg checking anyway
464 HPT_SCP(scp)->sgcnt = pci_map_sg(hba->pcidev,
466 scp->sc_data_direction);
467 HPT_SCP(scp)->mapped = 1;
468 BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
470 for (idx = 0; idx < HPT_SCP(scp)->sgcnt; idx++) {
471 psg[idx].pci_address =
472 cpu_to_le64(sg_dma_address(&sglist[idx]));
473 psg[idx].size = cpu_to_le32(sg_dma_len(&sglist[idx]));
474 psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
478 return HPT_SCP(scp)->sgcnt;
480 HPT_SCP(scp)->dma_handle = pci_map_single(
483 scp->request_bufflen,
484 scp->sc_data_direction
486 HPT_SCP(scp)->mapped = 1;
487 psg->pci_address = cpu_to_le64(HPT_SCP(scp)->dma_handle);
488 psg->size = cpu_to_le32(scp->request_bufflen);
489 psg->eot = cpu_to_le32(1);
494 static int hptiop_queuecommand(struct scsi_cmnd *scp,
495 void (*done)(struct scsi_cmnd *))
497 struct Scsi_Host *host = scp->device->host;
498 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
499 struct hpt_iop_request_scsi_command *req;
501 struct hptiop_request *_req;
504 scp->scsi_done = done;
508 dprintk("hptiop_queuecmd : no free req\n");
509 return SCSI_MLQUEUE_HOST_BUSY;
514 dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
515 "req_index=%d, req=%p\n",
517 host->host_no, scp->device->channel,
518 scp->device->id, scp->device->lun,
519 *((u32 *)&scp->cmnd),
520 *((u32 *)&scp->cmnd + 1),
521 *((u32 *)&scp->cmnd + 2),
522 _req->index, _req->req_virt);
526 if (scp->device->channel || scp->device->lun ||
527 scp->device->id > hba->max_devices) {
528 scp->result = DID_BAD_TARGET << 16;
533 req = (struct hpt_iop_request_scsi_command *)_req->req_virt;
535 /* build S/G table */
536 if (scp->request_bufflen)
537 sg_count = hptiop_buildsgl(scp, req->sg_list);
539 HPT_SCP(scp)->mapped = 0;
541 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
542 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
543 req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
544 req->header.context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
546 req->header.context_hi32 = 0;
547 req->dataxfer_length = cpu_to_le32(scp->request_bufflen);
548 req->channel = scp->device->channel;
549 req->target = scp->device->id;
550 req->lun = scp->device->lun;
551 req->header.size = cpu_to_le32(
552 sizeof(struct hpt_iop_request_scsi_command)
553 - sizeof(struct hpt_iopsg)
554 + sg_count * sizeof(struct hpt_iopsg));
556 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
558 writel(IOPMU_QUEUE_ADDR_HOST_BIT | _req->req_shifted_phy,
559 &hba->iop->inbound_queue);
564 dprintk("scsi_done(scp=%p)\n", scp);
569 static const char *hptiop_info(struct Scsi_Host *host)
571 return driver_name_long;
574 static int hptiop_reset_hba(struct hptiop_hba *hba)
576 if (atomic_xchg(&hba->resetting, 1) == 0) {
577 atomic_inc(&hba->reset_count);
578 writel(IOPMU_INBOUND_MSG0_RESET,
579 &hba->iop->outbound_msgaddr0);
580 hptiop_pci_posting_flush(hba->iop);
583 wait_event_timeout(hba->reset_wq,
584 atomic_read(&hba->resetting) == 0, 60 * HZ);
586 if (atomic_read(&hba->resetting)) {
587 /* IOP is in unkown state, abort reset */
588 printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
592 if (iop_send_sync_msg(hba,
593 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
594 dprintk("scsi%d: fail to start background task\n",
601 static int hptiop_reset(struct scsi_cmnd *scp)
603 struct Scsi_Host * host = scp->device->host;
604 struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
606 printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
607 scp->device->host->host_no, scp->device->channel,
608 scp->device->id, scp);
610 return hptiop_reset_hba(hba)? FAILED : SUCCESS;
613 static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
616 if(queue_depth > 256)
618 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
622 struct hptiop_getinfo {
630 static void hptiop_copy_mem_info(struct hptiop_getinfo *pinfo,
631 char *data, int datalen)
633 if (pinfo->filpos < pinfo->bufoffset) {
634 if (pinfo->filpos + datalen <= pinfo->bufoffset) {
635 pinfo->filpos += datalen;
638 data += (pinfo->bufoffset - pinfo->filpos);
639 datalen -= (pinfo->bufoffset - pinfo->filpos);
640 pinfo->filpos = pinfo->bufoffset;
644 pinfo->filpos += datalen;
645 if (pinfo->buffillen == pinfo->buflength)
648 if (pinfo->buflength - pinfo->buffillen < datalen)
649 datalen = pinfo->buflength - pinfo->buffillen;
651 if (copy_to_user(pinfo->buffer + pinfo->buffillen, data, datalen))
654 pinfo->buffillen += datalen;
657 static int hptiop_copy_info(struct hptiop_getinfo *pinfo, char *fmt, ...)
664 len = vsnprintf(buf, sizeof(buf), fmt, args);
666 hptiop_copy_mem_info(pinfo, buf, len);
670 static void hptiop_ioctl_done(struct hpt_ioctl_k *arg)
673 wake_up(&arg->hba->ioctl_wq);
676 static void hptiop_do_ioctl(struct hpt_ioctl_k *arg)
678 struct hptiop_hba *hba = arg->hba;
680 struct hpt_iop_request_ioctl_command __iomem *req;
683 dprintk("scsi%d: hptiop_do_ioctl\n", hba->host->host_no);
686 * check (in + out) buff size from application.
687 * outbuf must be dword aligned.
689 if (((arg->inbuf_size + 3) & ~3) + arg->outbuf_size >
690 hba->max_request_size
691 - sizeof(struct hpt_iop_request_header)
693 dprintk("scsi%d: ioctl buf size (%d/%d) is too large\n",
695 arg->inbuf_size, arg->outbuf_size);
696 arg->result = HPT_IOCTL_RESULT_FAILED;
701 spin_lock_irq(hba->host->host_lock);
703 val = readl(&hba->iop->inbound_queue);
704 if (val == IOPMU_QUEUE_EMPTY) {
705 spin_unlock_irq(hba->host->host_lock);
706 dprintk("scsi%d: no free req for ioctl\n", hba->host->host_no);
711 req = (struct hpt_iop_request_ioctl_command __iomem *)
712 ((unsigned long)hba->iop + val);
714 writel(HPT_CTL_CODE_LINUX_TO_IOP(arg->ioctl_code),
716 writel(arg->inbuf_size, &req->inbuf_size);
717 writel(arg->outbuf_size, &req->outbuf_size);
720 * use the buffer on the IOP local memory first, then copy it
722 * the caller's request buffer shoudl be little-endian.
725 memcpy_toio(req->buf, arg->inbuf, arg->inbuf_size);
727 /* correct the controller ID for IOP */
728 if ((arg->ioctl_code == HPT_IOCTL_GET_CHANNEL_INFO ||
729 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO_V2 ||
730 arg->ioctl_code == HPT_IOCTL_GET_CONTROLLER_INFO)
731 && arg->inbuf_size >= sizeof(u32))
734 writel(IOP_REQUEST_TYPE_IOCTL_COMMAND, &req->header.type);
735 writel(0, &req->header.flags);
736 writel(offsetof(struct hpt_iop_request_ioctl_command, buf)
737 + arg->inbuf_size, &req->header.size);
738 writel((u32)(unsigned long)arg, &req->header.context);
739 writel(BITS_PER_LONG > 32 ? (u32)((unsigned long)arg>>32) : 0,
740 &req->header.context_hi32);
741 writel(IOP_RESULT_PENDING, &req->header.result);
743 arg->result = HPT_IOCTL_RESULT_FAILED;
744 arg->done = hptiop_ioctl_done;
746 writel(val, &hba->iop->inbound_queue);
747 hptiop_pci_posting_flush(hba->iop);
749 spin_unlock_irq(hba->host->host_lock);
751 wait_event_timeout(hba->ioctl_wq, arg->done == NULL, 60 * HZ);
753 if (arg->done != NULL) {
754 hptiop_reset_hba(hba);
755 if (ioctl_retry++ < 3)
759 dprintk("hpt_iop_ioctl %x result %d\n",
760 arg->ioctl_code, arg->result);
763 static int __hpt_do_ioctl(struct hptiop_hba *hba, u32 code, void *inbuf,
764 u32 insize, void *outbuf, u32 outsize)
766 struct hpt_ioctl_k arg;
768 arg.ioctl_code = code;
771 arg.inbuf_size = insize;
772 arg.outbuf_size = outsize;
773 arg.bytes_returned = NULL;
774 hptiop_do_ioctl(&arg);
778 static inline int hpt_id_valid(__le32 id)
780 return id != 0 && id != cpu_to_le32(0xffffffff);
783 static int hptiop_get_controller_info(struct hptiop_hba *hba,
784 struct hpt_controller_info *pinfo)
788 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CONTROLLER_INFO,
789 &id, sizeof(int), pinfo, sizeof(*pinfo));
793 static int hptiop_get_channel_info(struct hptiop_hba *hba, int bus,
794 struct hpt_channel_info *pinfo)
800 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_CHANNEL_INFO,
801 ids, sizeof(ids), pinfo, sizeof(*pinfo));
805 static int hptiop_get_logical_devices(struct hptiop_hba *hba,
806 __le32 *pids, int maxcount)
809 u32 count = maxcount - 1;
811 if (__hpt_do_ioctl(hba, HPT_IOCTL_GET_LOGICAL_DEVICES,
813 pids, sizeof(u32) * maxcount))
816 maxcount = le32_to_cpu(pids[0]);
817 for (i = 0; i < maxcount; i++)
823 static int hptiop_get_device_info_v3(struct hptiop_hba *hba, __le32 id,
824 struct hpt_logical_device_info_v3 *pinfo)
826 return __hpt_do_ioctl(hba, HPT_IOCTL_GET_DEVICE_INFO_V3,
828 pinfo, sizeof(*pinfo));
831 static const char *get_array_status(struct hpt_logical_device_info_v3 *devinfo)
834 u32 flags = le32_to_cpu(devinfo->u.array.flags);
835 u32 trans_prog = le32_to_cpu(devinfo->u.array.transforming_progress);
836 u32 reb_prog = le32_to_cpu(devinfo->u.array.rebuilding_progress);
838 if (flags & ARRAY_FLAG_DISABLED)
840 else if (flags & ARRAY_FLAG_TRANSFORMING)
841 sprintf(s, "Expanding/Migrating %d.%d%%%s%s",
844 (flags & (ARRAY_FLAG_NEEDBUILDING|ARRAY_FLAG_BROKEN))?
846 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
847 !(flags & ARRAY_FLAG_REBUILDING) &&
848 !(flags & ARRAY_FLAG_INITIALIZING))?
849 ", Unintialized" : "");
850 else if ((flags & ARRAY_FLAG_BROKEN) &&
851 devinfo->u.array.array_type != AT_RAID6)
853 else if (flags & ARRAY_FLAG_REBUILDING)
855 (flags & ARRAY_FLAG_NEEDINITIALIZING)?
856 "%sBackground initializing %d.%d%%" :
857 "%sRebuilding %d.%d%%",
858 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
861 else if (flags & ARRAY_FLAG_VERIFYING)
862 sprintf(s, "%sVerifying %d.%d%%",
863 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
866 else if (flags & ARRAY_FLAG_INITIALIZING)
867 sprintf(s, "%sForground initializing %d.%d%%",
868 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
871 else if (flags & ARRAY_FLAG_NEEDTRANSFORM)
872 sprintf(s,"%s%s%s", "Need Expanding/Migrating",
873 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "",
874 ((flags & ARRAY_FLAG_NEEDINITIALIZING) &&
875 !(flags & ARRAY_FLAG_REBUILDING) &&
876 !(flags & ARRAY_FLAG_INITIALIZING))?
877 ", Unintialized" : "");
878 else if (flags & ARRAY_FLAG_NEEDINITIALIZING &&
879 !(flags & ARRAY_FLAG_REBUILDING) &&
880 !(flags & ARRAY_FLAG_INITIALIZING))
881 sprintf(s,"%sUninitialized",
882 (flags & ARRAY_FLAG_BROKEN)? "Critical, " : "");
883 else if ((flags & ARRAY_FLAG_NEEDBUILDING) ||
884 (flags & ARRAY_FLAG_BROKEN))
891 static void hptiop_dump_devinfo(struct hptiop_hba *hba,
892 struct hptiop_getinfo *pinfo, __le32 id, int indent)
894 struct hpt_logical_device_info_v3 devinfo;
898 for (i = 0; i < indent; i++)
899 hptiop_copy_info(pinfo, "\t");
901 if (hptiop_get_device_info_v3(hba, id, &devinfo)) {
902 hptiop_copy_info(pinfo, "unknown\n");
906 switch (devinfo.type) {
909 struct hd_driveid *driveid;
910 u32 flags = le32_to_cpu(devinfo.u.device.flags);
912 driveid = (struct hd_driveid *)devinfo.u.device.ident;
913 /* model[] is 40 chars long, but we just want 20 chars here */
914 driveid->model[20] = 0;
917 if (flags & DEVICE_FLAG_DISABLED)
918 hptiop_copy_info(pinfo,"Missing\n");
920 hptiop_copy_info(pinfo, "CH%d %s\n",
921 devinfo.u.device.path_id + 1,
924 capacity = le64_to_cpu(devinfo.capacity) * 512;
925 do_div(capacity, 1000000);
926 hptiop_copy_info(pinfo,
927 "CH%d %s, %lluMB, %s %s%s%s%s\n",
928 devinfo.u.device.path_id + 1,
931 (flags & DEVICE_FLAG_DISABLED)?
932 "Disabled" : "Normal",
933 devinfo.u.device.read_ahead_enabled?
935 devinfo.u.device.write_cache_enabled?
937 devinfo.u.device.TCQ_enabled?
939 devinfo.u.device.NCQ_enabled?
947 if (devinfo.target_id != INVALID_TARGET_ID)
948 hptiop_copy_info(pinfo, "[DISK %d_%d] ",
949 devinfo.vbus_id, devinfo.target_id);
951 capacity = le64_to_cpu(devinfo.capacity) * 512;
952 do_div(capacity, 1000000);
953 hptiop_copy_info(pinfo, "%s (%s), %lluMB, %s\n",
954 devinfo.u.array.name,
955 devinfo.u.array.array_type==AT_RAID0? "RAID0" :
956 devinfo.u.array.array_type==AT_RAID1? "RAID1" :
957 devinfo.u.array.array_type==AT_RAID5? "RAID5" :
958 devinfo.u.array.array_type==AT_RAID6? "RAID6" :
959 devinfo.u.array.array_type==AT_JBOD? "JBOD" :
962 get_array_status(&devinfo));
963 for (i = 0; i < devinfo.u.array.ndisk; i++) {
964 if (hpt_id_valid(devinfo.u.array.members[i])) {
965 if (cpu_to_le16(1<<i) &
966 devinfo.u.array.critical_members)
967 hptiop_copy_info(pinfo, "\t*");
968 hptiop_dump_devinfo(hba, pinfo,
969 devinfo.u.array.members[i], indent+1);
972 hptiop_copy_info(pinfo, "\tMissing\n");
974 if (id == devinfo.u.array.transform_source) {
975 hptiop_copy_info(pinfo, "\tExpanding/Migrating to:\n");
976 hptiop_dump_devinfo(hba, pinfo,
977 devinfo.u.array.transform_target, indent+1);
983 static ssize_t hptiop_show_version(struct class_device *class_dev, char *buf)
985 return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
988 static ssize_t hptiop_cdev_read(struct file *filp, char __user *buf,
989 size_t count, loff_t *ppos)
991 struct hptiop_hba *hba = filp->private_data;
992 struct hptiop_getinfo info;
994 struct hpt_controller_info con_info;
995 struct hpt_channel_info chan_info;
999 info.buflength = count;
1000 info.bufoffset = ppos ? *ppos : 0;
1004 if (hptiop_get_controller_info(hba, &con_info))
1007 for (i = 0; i < con_info.num_buses; i++) {
1008 if (hptiop_get_channel_info(hba, i, &chan_info) == 0) {
1009 if (hpt_id_valid(chan_info.devices[0]))
1010 hptiop_dump_devinfo(hba, &info,
1011 chan_info.devices[0], 0);
1012 if (hpt_id_valid(chan_info.devices[1]))
1013 hptiop_dump_devinfo(hba, &info,
1014 chan_info.devices[1], 0);
1018 ndev = hptiop_get_logical_devices(hba, ids,
1019 sizeof(ids) / sizeof(ids[0]));
1022 * if hptiop_get_logical_devices fails, ndev==-1 and it just
1023 * output nothing here
1025 for (j = 0; j < ndev; j++)
1026 hptiop_dump_devinfo(hba, &info, ids[j], 0);
1029 *ppos += info.buffillen;
1031 return info.buffillen;
1034 static int hptiop_cdev_ioctl(struct inode *inode, struct file *file,
1035 unsigned int cmd, unsigned long arg)
1037 struct hptiop_hba *hba = file->private_data;
1038 struct hpt_ioctl_u ioctl_u;
1039 struct hpt_ioctl_k ioctl_k;
1043 if (copy_from_user(&ioctl_u,
1044 (void __user *)arg, sizeof(struct hpt_ioctl_u)))
1047 if (ioctl_u.magic != HPT_IOCTL_MAGIC)
1050 ioctl_k.ioctl_code = ioctl_u.ioctl_code;
1051 ioctl_k.inbuf = NULL;
1052 ioctl_k.inbuf_size = ioctl_u.inbuf_size;
1053 ioctl_k.outbuf = NULL;
1054 ioctl_k.outbuf_size = ioctl_u.outbuf_size;
1056 ioctl_k.bytes_returned = &bytes_returned;
1058 /* verify user buffer */
1059 if ((ioctl_k.inbuf_size && !access_ok(VERIFY_READ,
1060 ioctl_u.inbuf, ioctl_k.inbuf_size)) ||
1061 (ioctl_k.outbuf_size && !access_ok(VERIFY_WRITE,
1062 ioctl_u.outbuf, ioctl_k.outbuf_size)) ||
1063 (ioctl_u.bytes_returned && !access_ok(VERIFY_WRITE,
1064 ioctl_u.bytes_returned, sizeof(u32))) ||
1065 ioctl_k.inbuf_size + ioctl_k.outbuf_size > 0x10000) {
1067 dprintk("scsi%d: got bad user address\n", hba->host->host_no);
1071 /* map buffer to kernel. */
1072 if (ioctl_k.inbuf_size) {
1073 ioctl_k.inbuf = kmalloc(ioctl_k.inbuf_size, GFP_KERNEL);
1074 if (!ioctl_k.inbuf) {
1075 dprintk("scsi%d: fail to alloc inbuf\n",
1076 hba->host->host_no);
1081 if (copy_from_user(ioctl_k.inbuf,
1082 ioctl_u.inbuf, ioctl_k.inbuf_size)) {
1087 if (ioctl_k.outbuf_size) {
1088 ioctl_k.outbuf = kmalloc(ioctl_k.outbuf_size, GFP_KERNEL);
1089 if (!ioctl_k.outbuf) {
1090 dprintk("scsi%d: fail to alloc outbuf\n",
1091 hba->host->host_no);
1097 hptiop_do_ioctl(&ioctl_k);
1099 if (ioctl_k.result == HPT_IOCTL_RESULT_OK) {
1100 if (ioctl_k.outbuf_size &&
1101 copy_to_user(ioctl_u.outbuf,
1102 ioctl_k.outbuf, ioctl_k.outbuf_size))
1105 if (ioctl_u.bytes_returned &&
1106 copy_to_user(ioctl_u.bytes_returned,
1107 &bytes_returned, sizeof(u32)))
1114 kfree(ioctl_k.inbuf);
1115 kfree(ioctl_k.outbuf);
1120 static int hptiop_cdev_open(struct inode *inode, struct file *file)
1122 struct hptiop_hba *hba;
1123 unsigned i = 0, minor = iminor(inode);
1126 spin_lock(&hptiop_hba_list_lock);
1127 list_for_each_entry(hba, &hptiop_hba_list, link) {
1129 file->private_data = hba;
1137 spin_unlock(&hptiop_hba_list_lock);
1141 static struct file_operations hptiop_cdev_fops = {
1142 .owner = THIS_MODULE,
1143 .read = hptiop_cdev_read,
1144 .ioctl = hptiop_cdev_ioctl,
1145 .open = hptiop_cdev_open,
1148 static ssize_t hptiop_show_fw_version(struct class_device *class_dev, char *buf)
1150 struct Scsi_Host *host = class_to_shost(class_dev);
1151 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1153 return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
1154 hba->firmware_version >> 24,
1155 (hba->firmware_version >> 16) & 0xff,
1156 (hba->firmware_version >> 8) & 0xff,
1157 hba->firmware_version & 0xff);
1160 static struct class_device_attribute hptiop_attr_version = {
1162 .name = "driver-version",
1165 .show = hptiop_show_version,
1168 static struct class_device_attribute hptiop_attr_fw_version = {
1170 .name = "firmware-version",
1173 .show = hptiop_show_fw_version,
1176 static struct class_device_attribute *hptiop_attrs[] = {
1177 &hptiop_attr_version,
1178 &hptiop_attr_fw_version,
1182 static struct scsi_host_template driver_template = {
1183 .module = THIS_MODULE,
1184 .name = driver_name,
1185 .queuecommand = hptiop_queuecommand,
1186 .eh_device_reset_handler = hptiop_reset,
1187 .eh_bus_reset_handler = hptiop_reset,
1188 .info = hptiop_info,
1189 .unchecked_isa_dma = 0,
1191 .use_clustering = ENABLE_CLUSTERING,
1192 .proc_name = driver_name,
1193 .shost_attrs = hptiop_attrs,
1195 .change_queue_depth = hptiop_adjust_disk_queue_depth,
1198 static int __devinit hptiop_probe(struct pci_dev *pcidev,
1199 const struct pci_device_id *id)
1201 struct Scsi_Host *host = NULL;
1202 struct hptiop_hba *hba;
1203 struct hpt_iop_request_get_config iop_config;
1204 struct hpt_iop_request_set_config set_config;
1205 dma_addr_t start_phy;
1207 u32 offset, i, req_size;
1209 dprintk("hptiop_probe(%p)\n", pcidev);
1211 if (pci_enable_device(pcidev)) {
1212 printk(KERN_ERR "hptiop: fail to enable pci device\n");
1216 printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
1217 pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
1220 pci_set_master(pcidev);
1222 /* Enable 64bit DMA if possible */
1223 if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
1224 if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
1225 printk(KERN_ERR "hptiop: fail to set dma_mask\n");
1226 goto disable_pci_device;
1230 if (pci_request_regions(pcidev, driver_name)) {
1231 printk(KERN_ERR "hptiop: pci_request_regions failed\n");
1232 goto disable_pci_device;
1235 host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
1237 printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
1238 goto free_pci_regions;
1241 hba = (struct hptiop_hba *)host->hostdata;
1243 hba->pcidev = pcidev;
1245 hba->initialized = 0;
1247 atomic_set(&hba->resetting, 0);
1248 atomic_set(&hba->reset_count, 0);
1250 init_waitqueue_head(&hba->reset_wq);
1251 init_waitqueue_head(&hba->ioctl_wq);
1254 host->max_channel = 0;
1256 host->n_io_port = 0;
1257 host->irq = pcidev->irq;
1259 if (hptiop_map_pci_bar(hba))
1260 goto free_scsi_host;
1262 if (iop_wait_ready(hba->iop, 20000)) {
1263 printk(KERN_ERR "scsi%d: firmware not ready\n",
1264 hba->host->host_no);
1268 if (iop_get_config(hba, &iop_config)) {
1269 printk(KERN_ERR "scsi%d: get config failed\n",
1270 hba->host->host_no);
1274 hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
1275 HPTIOP_MAX_REQUESTS);
1276 hba->max_devices = le32_to_cpu(iop_config.max_devices);
1277 hba->max_request_size = le32_to_cpu(iop_config.request_size);
1278 hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
1279 hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
1280 hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
1282 host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
1283 host->max_id = le32_to_cpu(iop_config.max_devices);
1284 host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
1285 host->can_queue = le32_to_cpu(iop_config.max_requests);
1286 host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
1287 host->max_cmd_len = 16;
1289 set_config.vbus_id = cpu_to_le32(host->host_no);
1290 set_config.iop_id = cpu_to_le32(host->host_no);
1292 if (iop_set_config(hba, &set_config)) {
1293 printk(KERN_ERR "scsi%d: set config failed\n",
1294 hba->host->host_no);
1298 if (scsi_add_host(host, &pcidev->dev)) {
1299 printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
1300 hba->host->host_no);
1304 pci_set_drvdata(pcidev, host);
1306 if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
1307 driver_name, hba)) {
1308 printk(KERN_ERR "scsi%d: request irq %d failed\n",
1309 hba->host->host_no, pcidev->irq);
1310 goto remove_scsi_host;
1313 /* Allocate request mem */
1314 req_size = sizeof(struct hpt_iop_request_scsi_command)
1315 + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
1316 if ((req_size& 0x1f) != 0)
1317 req_size = (req_size + 0x1f) & ~0x1f;
1319 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1321 hba->req_size = req_size;
1322 start_virt = dma_alloc_coherent(&pcidev->dev,
1323 hba->req_size*hba->max_requests + 0x20,
1324 &start_phy, GFP_KERNEL);
1327 printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
1328 hba->host->host_no);
1329 goto free_request_irq;
1332 hba->dma_coherent = start_virt;
1333 hba->dma_coherent_handle = start_phy;
1335 if ((start_phy & 0x1f) != 0)
1337 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
1338 start_phy += offset;
1339 start_virt += offset;
1342 hba->req_list = start_virt;
1343 for (i = 0; i < hba->max_requests; i++) {
1344 hba->reqs[i].next = NULL;
1345 hba->reqs[i].req_virt = start_virt;
1346 hba->reqs[i].req_shifted_phy = start_phy >> 5;
1347 hba->reqs[i].index = i;
1348 free_req(hba, &hba->reqs[i]);
1349 start_virt = (char *)start_virt + hba->req_size;
1350 start_phy = start_phy + hba->req_size;
1353 /* Enable Interrupt and start background task */
1354 if (hptiop_initialize_iop(hba))
1355 goto free_request_mem;
1357 spin_lock(&hptiop_hba_list_lock);
1358 list_add_tail(&hba->link, &hptiop_hba_list);
1359 spin_unlock(&hptiop_hba_list_lock);
1361 scsi_scan_host(host);
1363 dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
1367 dma_free_coherent(&hba->pcidev->dev,
1368 hba->req_size*hba->max_requests + 0x20,
1369 hba->dma_coherent, hba->dma_coherent_handle);
1372 free_irq(hba->pcidev->irq, hba);
1375 scsi_remove_host(host);
1381 pci_release_regions(pcidev) ;
1384 scsi_host_put(host);
1387 pci_disable_device(pcidev);
1389 dprintk("scsi%d: hptiop_probe fail\n", host->host_no);
1393 static void hptiop_shutdown(struct pci_dev *pcidev)
1395 struct Scsi_Host *host = pci_get_drvdata(pcidev);
1396 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1397 struct hpt_iopmu __iomem *iop = hba->iop;
1400 dprintk("hptiop_shutdown(%p)\n", hba);
1403 if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1404 printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
1405 hba->host->host_no);
1407 /* disable all outbound interrupts */
1408 int_mask = readl(&iop->outbound_intmask);
1410 IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
1411 &iop->outbound_intmask);
1412 hptiop_pci_posting_flush(iop);
1415 static void hptiop_remove(struct pci_dev *pcidev)
1417 struct Scsi_Host *host = pci_get_drvdata(pcidev);
1418 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1420 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1422 scsi_remove_host(host);
1424 spin_lock(&hptiop_hba_list_lock);
1425 list_del_init(&hba->link);
1426 spin_unlock(&hptiop_hba_list_lock);
1428 hptiop_shutdown(pcidev);
1430 free_irq(hba->pcidev->irq, hba);
1432 dma_free_coherent(&hba->pcidev->dev,
1433 hba->req_size * hba->max_requests + 0x20,
1435 hba->dma_coherent_handle);
1439 pci_release_regions(hba->pcidev);
1440 pci_set_drvdata(hba->pcidev, NULL);
1441 pci_disable_device(hba->pcidev);
1443 scsi_host_put(host);
1446 static struct pci_device_id hptiop_id_table[] = {
1447 { PCI_DEVICE(0x1103, 0x3220) },
1448 { PCI_DEVICE(0x1103, 0x3320) },
1452 MODULE_DEVICE_TABLE(pci, hptiop_id_table);
1454 static struct pci_driver hptiop_pci_driver = {
1455 .name = driver_name,
1456 .id_table = hptiop_id_table,
1457 .probe = hptiop_probe,
1458 .remove = hptiop_remove,
1459 .shutdown = hptiop_shutdown,
1462 static int __init hptiop_module_init(void)
1466 printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
1468 error = pci_register_driver(&hptiop_pci_driver);
1472 hptiop_cdev_major = register_chrdev(0, "hptiop", &hptiop_cdev_fops);
1473 if (hptiop_cdev_major < 0) {
1474 printk(KERN_WARNING "unable to register hptiop device.\n");
1475 return hptiop_cdev_major;
1481 static void __exit hptiop_module_exit(void)
1483 dprintk("hptiop_module_exit\n");
1484 unregister_chrdev(hptiop_cdev_major, "hptiop");
1485 pci_unregister_driver(&hptiop_pci_driver);
1489 module_init(hptiop_module_init);
1490 module_exit(hptiop_module_exit);
1492 MODULE_LICENSE("GPL");