2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <asm/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
51 #include <rdma/ib_cache.h>
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "0.2"
58 #define DRV_RELDATE "November 1, 2005"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
66 static int srp_max_iu_len;
68 module_param(srp_sg_tablesize, int, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize,
70 "Max number of gather/scatter entries per I/O (default is 12)");
72 static int topspin_workarounds = 1;
74 module_param(topspin_workarounds, int, 0444);
75 MODULE_PARM_DESC(topspin_workarounds,
76 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
78 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
80 static int mellanox_workarounds = 1;
82 module_param(mellanox_workarounds, int, 0444);
83 MODULE_PARM_DESC(mellanox_workarounds,
84 "Enable workarounds for Mellanox SRP target bugs if != 0");
86 static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
88 static void srp_add_one(struct ib_device *device);
89 static void srp_remove_one(struct ib_device *device);
90 static void srp_completion(struct ib_cq *cq, void *target_ptr);
91 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
93 static struct ib_client srp_client = {
96 .remove = srp_remove_one
99 static struct ib_sa_client srp_sa_client;
101 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
103 return (struct srp_target_port *) host->hostdata;
106 static const char *srp_target_info(struct Scsi_Host *host)
108 return host_to_target(host)->target_name;
111 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
113 enum dma_data_direction direction)
117 iu = kmalloc(sizeof *iu, gfp_mask);
121 iu->buf = kzalloc(size, gfp_mask);
125 iu->dma = dma_map_single(host->dev->dev->dma_device,
126 iu->buf, size, direction);
127 if (dma_mapping_error(iu->dma))
131 iu->direction = direction;
143 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
148 dma_unmap_single(host->dev->dev->dma_device,
149 iu->dma, iu->size, iu->direction);
154 static void srp_qp_event(struct ib_event *event, void *context)
156 printk(KERN_ERR PFX "QP event %d\n", event->event);
159 static int srp_init_qp(struct srp_target_port *target,
162 struct ib_qp_attr *attr;
165 attr = kmalloc(sizeof *attr, GFP_KERNEL);
169 ret = ib_find_cached_pkey(target->srp_host->dev->dev,
170 target->srp_host->port,
171 be16_to_cpu(target->path.pkey),
176 attr->qp_state = IB_QPS_INIT;
177 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
178 IB_ACCESS_REMOTE_WRITE);
179 attr->port_num = target->srp_host->port;
181 ret = ib_modify_qp(qp, attr,
192 static int srp_create_target_ib(struct srp_target_port *target)
194 struct ib_qp_init_attr *init_attr;
197 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
201 target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
202 NULL, target, SRP_CQ_SIZE);
203 if (IS_ERR(target->cq)) {
204 ret = PTR_ERR(target->cq);
208 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
210 init_attr->event_handler = srp_qp_event;
211 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
212 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
213 init_attr->cap.max_recv_sge = 1;
214 init_attr->cap.max_send_sge = 1;
215 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
216 init_attr->qp_type = IB_QPT_RC;
217 init_attr->send_cq = target->cq;
218 init_attr->recv_cq = target->cq;
220 target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr);
221 if (IS_ERR(target->qp)) {
222 ret = PTR_ERR(target->qp);
223 ib_destroy_cq(target->cq);
227 ret = srp_init_qp(target, target->qp);
229 ib_destroy_qp(target->qp);
230 ib_destroy_cq(target->cq);
239 static void srp_free_target_ib(struct srp_target_port *target)
243 ib_destroy_qp(target->qp);
244 ib_destroy_cq(target->cq);
246 for (i = 0; i < SRP_RQ_SIZE; ++i)
247 srp_free_iu(target->srp_host, target->rx_ring[i]);
248 for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
249 srp_free_iu(target->srp_host, target->tx_ring[i]);
252 static void srp_path_rec_completion(int status,
253 struct ib_sa_path_rec *pathrec,
256 struct srp_target_port *target = target_ptr;
258 target->status = status;
260 printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
262 target->path = *pathrec;
263 complete(&target->done);
266 static int srp_lookup_path(struct srp_target_port *target)
268 target->path.numb_path = 1;
270 init_completion(&target->done);
272 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
273 target->srp_host->dev->dev,
274 target->srp_host->port,
276 IB_SA_PATH_REC_DGID |
277 IB_SA_PATH_REC_SGID |
278 IB_SA_PATH_REC_NUMB_PATH |
280 SRP_PATH_REC_TIMEOUT_MS,
282 srp_path_rec_completion,
283 target, &target->path_query);
284 if (target->path_query_id < 0)
285 return target->path_query_id;
287 wait_for_completion(&target->done);
289 if (target->status < 0)
290 printk(KERN_WARNING PFX "Path record query failed\n");
292 return target->status;
295 static int srp_send_req(struct srp_target_port *target)
298 struct ib_cm_req_param param;
299 struct srp_login_req priv;
303 req = kzalloc(sizeof *req, GFP_KERNEL);
307 req->param.primary_path = &target->path;
308 req->param.alternate_path = NULL;
309 req->param.service_id = target->service_id;
310 req->param.qp_num = target->qp->qp_num;
311 req->param.qp_type = target->qp->qp_type;
312 req->param.private_data = &req->priv;
313 req->param.private_data_len = sizeof req->priv;
314 req->param.flow_control = 1;
316 get_random_bytes(&req->param.starting_psn, 4);
317 req->param.starting_psn &= 0xffffff;
320 * Pick some arbitrary defaults here; we could make these
321 * module parameters if anyone cared about setting them.
323 req->param.responder_resources = 4;
324 req->param.remote_cm_response_timeout = 20;
325 req->param.local_cm_response_timeout = 20;
326 req->param.retry_count = 7;
327 req->param.rnr_retry_count = 7;
328 req->param.max_cm_retries = 15;
330 req->priv.opcode = SRP_LOGIN_REQ;
332 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
333 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
334 SRP_BUF_FORMAT_INDIRECT);
336 * In the published SRP specification (draft rev. 16a), the
337 * port identifier format is 8 bytes of ID extension followed
338 * by 8 bytes of GUID. Older drafts put the two halves in the
339 * opposite order, so that the GUID comes first.
341 * Targets conforming to these obsolete drafts can be
342 * recognized by the I/O Class they report.
344 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
345 memcpy(req->priv.initiator_port_id,
346 &target->path.sgid.global.interface_id, 8);
347 memcpy(req->priv.initiator_port_id + 8,
348 &target->initiator_ext, 8);
349 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
350 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
352 memcpy(req->priv.initiator_port_id,
353 &target->initiator_ext, 8);
354 memcpy(req->priv.initiator_port_id + 8,
355 &target->path.sgid.global.interface_id, 8);
356 memcpy(req->priv.target_port_id, &target->id_ext, 8);
357 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
361 * Topspin/Cisco SRP targets will reject our login unless we
362 * zero out the first 8 bytes of our initiator port ID and set
363 * the second 8 bytes to the local node GUID.
365 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) {
366 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
367 "activated for target GUID %016llx\n",
368 (unsigned long long) be64_to_cpu(target->ioc_guid));
369 memset(req->priv.initiator_port_id, 0, 8);
370 memcpy(req->priv.initiator_port_id + 8,
371 &target->srp_host->dev->dev->node_guid, 8);
374 status = ib_send_cm_req(target->cm_id, &req->param);
381 static void srp_disconnect_target(struct srp_target_port *target)
383 /* XXX should send SRP_I_LOGOUT request */
385 init_completion(&target->done);
386 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
387 printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
390 wait_for_completion(&target->done);
393 static void srp_remove_work(void *target_ptr)
395 struct srp_target_port *target = target_ptr;
397 spin_lock_irq(target->scsi_host->host_lock);
398 if (target->state != SRP_TARGET_DEAD) {
399 spin_unlock_irq(target->scsi_host->host_lock);
402 target->state = SRP_TARGET_REMOVED;
403 spin_unlock_irq(target->scsi_host->host_lock);
405 spin_lock(&target->srp_host->target_lock);
406 list_del(&target->list);
407 spin_unlock(&target->srp_host->target_lock);
409 scsi_remove_host(target->scsi_host);
410 ib_destroy_cm_id(target->cm_id);
411 srp_free_target_ib(target);
412 scsi_host_put(target->scsi_host);
415 static int srp_connect_target(struct srp_target_port *target)
419 ret = srp_lookup_path(target);
424 init_completion(&target->done);
425 ret = srp_send_req(target);
428 wait_for_completion(&target->done);
431 * The CM event handling code will set status to
432 * SRP_PORT_REDIRECT if we get a port redirect REJ
433 * back, or SRP_DLID_REDIRECT if we get a lid/qp
436 switch (target->status) {
440 case SRP_PORT_REDIRECT:
441 ret = srp_lookup_path(target);
446 case SRP_DLID_REDIRECT:
450 return target->status;
455 static void srp_unmap_data(struct scsi_cmnd *scmnd,
456 struct srp_target_port *target,
457 struct srp_request *req)
459 struct scatterlist *scat;
462 if (!scmnd->request_buffer ||
463 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
464 scmnd->sc_data_direction != DMA_FROM_DEVICE))
468 ib_fmr_pool_unmap(req->fmr);
473 * This handling of non-SG commands can be killed when the
474 * SCSI midlayer no longer generates non-SG commands.
476 if (likely(scmnd->use_sg)) {
477 nents = scmnd->use_sg;
478 scat = scmnd->request_buffer;
481 scat = &req->fake_sg;
484 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents,
485 scmnd->sc_data_direction);
488 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
490 srp_unmap_data(req->scmnd, target, req);
491 list_move_tail(&req->list, &target->free_reqs);
494 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
496 req->scmnd->result = DID_RESET << 16;
497 req->scmnd->scsi_done(req->scmnd);
498 srp_remove_req(target, req);
501 static int srp_reconnect_target(struct srp_target_port *target)
503 struct ib_cm_id *new_cm_id;
504 struct ib_qp_attr qp_attr;
505 struct srp_request *req, *tmp;
509 spin_lock_irq(target->scsi_host->host_lock);
510 if (target->state != SRP_TARGET_LIVE) {
511 spin_unlock_irq(target->scsi_host->host_lock);
514 target->state = SRP_TARGET_CONNECTING;
515 spin_unlock_irq(target->scsi_host->host_lock);
517 srp_disconnect_target(target);
519 * Now get a new local CM ID so that we avoid confusing the
520 * target in case things are really fouled up.
522 new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
523 srp_cm_handler, target);
524 if (IS_ERR(new_cm_id)) {
525 ret = PTR_ERR(new_cm_id);
528 ib_destroy_cm_id(target->cm_id);
529 target->cm_id = new_cm_id;
531 qp_attr.qp_state = IB_QPS_RESET;
532 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
536 ret = srp_init_qp(target, target->qp);
540 while (ib_poll_cq(target->cq, 1, &wc) > 0)
543 spin_lock_irq(target->scsi_host->host_lock);
544 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
545 srp_reset_req(target, req);
546 spin_unlock_irq(target->scsi_host->host_lock);
552 ret = srp_connect_target(target);
556 spin_lock_irq(target->scsi_host->host_lock);
557 if (target->state == SRP_TARGET_CONNECTING) {
559 target->state = SRP_TARGET_LIVE;
562 spin_unlock_irq(target->scsi_host->host_lock);
567 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
570 * We couldn't reconnect, so kill our target port off.
571 * However, we have to defer the real removal because we might
572 * be in the context of the SCSI error handler now, which
573 * would deadlock if we call scsi_remove_host().
575 spin_lock_irq(target->scsi_host->host_lock);
576 if (target->state == SRP_TARGET_CONNECTING) {
577 target->state = SRP_TARGET_DEAD;
578 INIT_WORK(&target->work, srp_remove_work, target);
579 schedule_work(&target->work);
581 spin_unlock_irq(target->scsi_host->host_lock);
586 static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
587 int sg_cnt, struct srp_request *req,
588 struct srp_direct_buf *buf)
596 struct srp_device *dev = target->srp_host->dev;
601 if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) &&
602 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3))
606 for (i = 0; i < sg_cnt; ++i) {
607 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
613 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) &
614 ~dev->fmr_page_mask) {
621 len += sg_dma_len(&scat[i]);
624 page_cnt += len >> dev->fmr_page_shift;
625 if (page_cnt > SRP_FMR_SIZE)
628 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
633 for (i = 0; i < sg_cnt; ++i)
634 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size)
635 dma_pages[page_cnt++] =
636 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
638 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
639 dma_pages, page_cnt, io_addr);
640 if (IS_ERR(req->fmr)) {
641 ret = PTR_ERR(req->fmr);
646 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask);
647 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
648 buf->len = cpu_to_be32(len);
658 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
659 struct srp_request *req)
661 struct scatterlist *scat;
662 struct srp_cmd *cmd = req->cmd->buf;
663 int len, nents, count;
664 u8 fmt = SRP_DATA_DESC_DIRECT;
666 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
667 return sizeof (struct srp_cmd);
669 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
670 scmnd->sc_data_direction != DMA_TO_DEVICE) {
671 printk(KERN_WARNING PFX "Unhandled data direction %d\n",
672 scmnd->sc_data_direction);
677 * This handling of non-SG commands can be killed when the
678 * SCSI midlayer no longer generates non-SG commands.
680 if (likely(scmnd->use_sg)) {
681 nents = scmnd->use_sg;
682 scat = scmnd->request_buffer;
685 scat = &req->fake_sg;
686 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
689 count = dma_map_sg(target->srp_host->dev->dev->dma_device,
690 scat, nents, scmnd->sc_data_direction);
692 fmt = SRP_DATA_DESC_DIRECT;
693 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
697 * The midlayer only generated a single gather/scatter
698 * entry, or DMA mapping coalesced everything to a
699 * single entry. So a direct descriptor along with
700 * the DMA MR suffices.
702 struct srp_direct_buf *buf = (void *) cmd->add_data;
704 buf->va = cpu_to_be64(sg_dma_address(scat));
705 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
706 buf->len = cpu_to_be32(sg_dma_len(scat));
707 } else if (srp_map_fmr(target, scat, count, req,
708 (void *) cmd->add_data)) {
710 * FMR mapping failed, and the scatterlist has more
711 * than one entry. Generate an indirect memory
714 struct srp_indirect_buf *buf = (void *) cmd->add_data;
718 fmt = SRP_DATA_DESC_INDIRECT;
719 len = sizeof (struct srp_cmd) +
720 sizeof (struct srp_indirect_buf) +
721 count * sizeof (struct srp_direct_buf);
723 for (i = 0; i < count; ++i) {
724 buf->desc_list[i].va =
725 cpu_to_be64(sg_dma_address(&scat[i]));
726 buf->desc_list[i].key =
727 cpu_to_be32(target->srp_host->dev->mr->rkey);
728 buf->desc_list[i].len =
729 cpu_to_be32(sg_dma_len(&scat[i]));
730 datalen += sg_dma_len(&scat[i]);
733 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
734 cmd->data_out_desc_cnt = count;
736 cmd->data_in_desc_cnt = count;
739 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
740 buf->table_desc.key =
741 cpu_to_be32(target->srp_host->dev->mr->rkey);
742 buf->table_desc.len =
743 cpu_to_be32(count * sizeof (struct srp_direct_buf));
745 buf->len = cpu_to_be32(datalen);
748 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
749 cmd->buf_fmt = fmt << 4;
756 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
758 struct srp_request *req;
759 struct scsi_cmnd *scmnd;
763 delta = (s32) be32_to_cpu(rsp->req_lim_delta);
765 spin_lock_irqsave(target->scsi_host->host_lock, flags);
767 target->req_lim += delta;
769 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
771 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
772 if (be32_to_cpu(rsp->resp_data_len) < 4)
773 req->tsk_status = -1;
775 req->tsk_status = rsp->data[3];
776 complete(&req->done);
780 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
781 (unsigned long long) rsp->tag);
782 scmnd->result = rsp->status;
784 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
785 memcpy(scmnd->sense_buffer, rsp->data +
786 be32_to_cpu(rsp->resp_data_len),
787 min_t(int, be32_to_cpu(rsp->sense_data_len),
788 SCSI_SENSE_BUFFERSIZE));
791 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
792 scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);
793 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
794 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
796 if (!req->tsk_mgmt) {
797 scmnd->host_scribble = (void *) -1L;
798 scmnd->scsi_done(scmnd);
800 srp_remove_req(target, req);
805 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
808 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
813 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
815 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
816 target->max_ti_iu_len, DMA_FROM_DEVICE);
818 opcode = *(u8 *) iu->buf;
823 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
825 for (i = 0; i < wc->byte_len; ++i) {
827 printk(KERN_ERR " [%02x] ", i);
828 printk(" %02x", ((u8 *) iu->buf)[i]);
829 if ((i + 1) % 8 == 0)
833 if (wc->byte_len % 8)
839 srp_process_rsp(target, iu->buf);
843 /* XXX Handle target logout */
844 printk(KERN_WARNING PFX "Got target logout request\n");
848 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
852 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
853 target->max_ti_iu_len, DMA_FROM_DEVICE);
856 static void srp_completion(struct ib_cq *cq, void *target_ptr)
858 struct srp_target_port *target = target_ptr;
861 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
862 while (ib_poll_cq(cq, 1, &wc) > 0) {
864 printk(KERN_ERR PFX "failed %s status %d\n",
865 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
870 if (wc.wr_id & SRP_OP_RECV)
871 srp_handle_recv(target, &wc);
877 static int __srp_post_recv(struct srp_target_port *target)
881 struct ib_recv_wr wr, *bad_wr;
885 next = target->rx_head & (SRP_RQ_SIZE - 1);
886 wr.wr_id = next | SRP_OP_RECV;
887 iu = target->rx_ring[next];
890 list.length = iu->size;
891 list.lkey = target->srp_host->dev->mr->lkey;
897 ret = ib_post_recv(target->qp, &wr, &bad_wr);
904 static int srp_post_recv(struct srp_target_port *target)
909 spin_lock_irqsave(target->scsi_host->host_lock, flags);
910 ret = __srp_post_recv(target);
911 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
917 * Must be called with target->scsi_host->host_lock held to protect
918 * req_lim and tx_head. Lock cannot be dropped between call here and
919 * call to __srp_post_send().
921 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
923 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
926 if (unlikely(target->req_lim < 1))
927 ++target->zero_req_lim;
929 return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
933 * Must be called with target->scsi_host->host_lock held to protect
934 * req_lim and tx_head.
936 static int __srp_post_send(struct srp_target_port *target,
937 struct srp_iu *iu, int len)
940 struct ib_send_wr wr, *bad_wr;
945 list.lkey = target->srp_host->dev->mr->lkey;
948 wr.wr_id = target->tx_head & SRP_SQ_SIZE;
951 wr.opcode = IB_WR_SEND;
952 wr.send_flags = IB_SEND_SIGNALED;
954 ret = ib_post_send(target->qp, &wr, &bad_wr);
964 static int srp_queuecommand(struct scsi_cmnd *scmnd,
965 void (*done)(struct scsi_cmnd *))
967 struct srp_target_port *target = host_to_target(scmnd->device->host);
968 struct srp_request *req;
973 if (target->state == SRP_TARGET_CONNECTING)
976 if (target->state == SRP_TARGET_DEAD ||
977 target->state == SRP_TARGET_REMOVED) {
978 scmnd->result = DID_BAD_TARGET << 16;
983 iu = __srp_get_tx_iu(target);
987 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
988 srp_max_iu_len, DMA_TO_DEVICE);
990 req = list_entry(target->free_reqs.next, struct srp_request, list);
992 scmnd->scsi_done = done;
994 scmnd->host_scribble = (void *) (long) req->index;
997 memset(cmd, 0, sizeof *cmd);
999 cmd->opcode = SRP_CMD;
1000 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1001 cmd->tag = req->index;
1002 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1007 req->tsk_mgmt = NULL;
1009 len = srp_map_data(scmnd, target, req);
1011 printk(KERN_ERR PFX "Failed to map data\n");
1015 if (__srp_post_recv(target)) {
1016 printk(KERN_ERR PFX "Recv failed\n");
1020 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
1021 srp_max_iu_len, DMA_TO_DEVICE);
1023 if (__srp_post_send(target, iu, len)) {
1024 printk(KERN_ERR PFX "Send failed\n");
1028 list_move_tail(&req->list, &target->req_queue);
1033 srp_unmap_data(scmnd, target, req);
1036 return SCSI_MLQUEUE_HOST_BUSY;
1039 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1043 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1044 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1045 target->max_ti_iu_len,
1046 GFP_KERNEL, DMA_FROM_DEVICE);
1047 if (!target->rx_ring[i])
1051 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1052 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1054 GFP_KERNEL, DMA_TO_DEVICE);
1055 if (!target->tx_ring[i])
1062 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1063 srp_free_iu(target->srp_host, target->rx_ring[i]);
1064 target->rx_ring[i] = NULL;
1067 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1068 srp_free_iu(target->srp_host, target->tx_ring[i]);
1069 target->tx_ring[i] = NULL;
1075 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1076 struct ib_cm_event *event,
1077 struct srp_target_port *target)
1079 struct ib_class_port_info *cpi;
1082 switch (event->param.rej_rcvd.reason) {
1083 case IB_CM_REJ_PORT_CM_REDIRECT:
1084 cpi = event->param.rej_rcvd.ari;
1085 target->path.dlid = cpi->redirect_lid;
1086 target->path.pkey = cpi->redirect_pkey;
1087 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1088 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1090 target->status = target->path.dlid ?
1091 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1094 case IB_CM_REJ_PORT_REDIRECT:
1095 if (topspin_workarounds &&
1096 !memcmp(&target->ioc_guid, topspin_oui, 3)) {
1098 * Topspin/Cisco SRP gateways incorrectly send
1099 * reject reason code 25 when they mean 24
1102 memcpy(target->path.dgid.raw,
1103 event->param.rej_rcvd.ari, 16);
1105 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1106 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1107 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1109 target->status = SRP_PORT_REDIRECT;
1111 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1112 target->status = -ECONNRESET;
1116 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1117 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1118 target->status = -ECONNRESET;
1121 case IB_CM_REJ_CONSUMER_DEFINED:
1122 opcode = *(u8 *) event->private_data;
1123 if (opcode == SRP_LOGIN_REJ) {
1124 struct srp_login_rej *rej = event->private_data;
1125 u32 reason = be32_to_cpu(rej->reason);
1127 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1128 printk(KERN_WARNING PFX
1129 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1131 printk(KERN_WARNING PFX
1132 "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1134 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1135 " opcode 0x%02x\n", opcode);
1136 target->status = -ECONNRESET;
1140 printk(KERN_WARNING " REJ reason 0x%x\n",
1141 event->param.rej_rcvd.reason);
1142 target->status = -ECONNRESET;
1146 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1148 struct srp_target_port *target = cm_id->context;
1149 struct ib_qp_attr *qp_attr = NULL;
1154 switch (event->event) {
1155 case IB_CM_REQ_ERROR:
1156 printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
1158 target->status = -ECONNRESET;
1161 case IB_CM_REP_RECEIVED:
1163 opcode = *(u8 *) event->private_data;
1165 if (opcode == SRP_LOGIN_RSP) {
1166 struct srp_login_rsp *rsp = event->private_data;
1168 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1169 target->req_lim = be32_to_cpu(rsp->req_lim_delta);
1171 target->scsi_host->can_queue = min(target->req_lim,
1172 target->scsi_host->can_queue);
1174 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
1175 target->status = -ECONNRESET;
1179 if (!target->rx_ring[0]) {
1180 target->status = srp_alloc_iu_bufs(target);
1185 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1187 target->status = -ENOMEM;
1191 qp_attr->qp_state = IB_QPS_RTR;
1192 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1196 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1200 target->status = srp_post_recv(target);
1204 qp_attr->qp_state = IB_QPS_RTS;
1205 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1209 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1213 target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1219 case IB_CM_REJ_RECEIVED:
1220 printk(KERN_DEBUG PFX "REJ received\n");
1223 srp_cm_rej_handler(cm_id, event, target);
1226 case IB_CM_DREQ_RECEIVED:
1227 printk(KERN_WARNING PFX "DREQ received - connection closed\n");
1228 if (ib_send_cm_drep(cm_id, NULL, 0))
1229 printk(KERN_ERR PFX "Sending CM DREP failed\n");
1232 case IB_CM_TIMEWAIT_EXIT:
1233 printk(KERN_ERR PFX "connection closed\n");
1239 case IB_CM_MRA_RECEIVED:
1240 case IB_CM_DREQ_ERROR:
1241 case IB_CM_DREP_RECEIVED:
1245 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
1250 complete(&target->done);
1257 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1258 struct srp_request *req, u8 func)
1261 struct srp_tsk_mgmt *tsk_mgmt;
1263 spin_lock_irq(target->scsi_host->host_lock);
1265 if (target->state == SRP_TARGET_DEAD ||
1266 target->state == SRP_TARGET_REMOVED) {
1267 req->scmnd->result = DID_BAD_TARGET << 16;
1271 init_completion(&req->done);
1273 iu = __srp_get_tx_iu(target);
1278 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1280 tsk_mgmt->opcode = SRP_TSK_MGMT;
1281 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1282 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
1283 tsk_mgmt->tsk_mgmt_func = func;
1284 tsk_mgmt->task_tag = req->index;
1286 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1291 spin_unlock_irq(target->scsi_host->host_lock);
1293 if (!wait_for_completion_timeout(&req->done,
1294 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1300 spin_unlock_irq(target->scsi_host->host_lock);
1304 static int srp_find_req(struct srp_target_port *target,
1305 struct scsi_cmnd *scmnd,
1306 struct srp_request **req)
1308 if (scmnd->host_scribble == (void *) -1L)
1311 *req = &target->req_ring[(long) scmnd->host_scribble];
1316 static int srp_abort(struct scsi_cmnd *scmnd)
1318 struct srp_target_port *target = host_to_target(scmnd->device->host);
1319 struct srp_request *req;
1322 printk(KERN_ERR "SRP abort called\n");
1324 if (srp_find_req(target, scmnd, &req))
1326 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1329 spin_lock_irq(target->scsi_host->host_lock);
1331 if (req->cmd_done) {
1332 srp_remove_req(target, req);
1333 scmnd->scsi_done(scmnd);
1334 } else if (!req->tsk_status) {
1335 srp_remove_req(target, req);
1336 scmnd->result = DID_ABORT << 16;
1340 spin_unlock_irq(target->scsi_host->host_lock);
1345 static int srp_reset_device(struct scsi_cmnd *scmnd)
1347 struct srp_target_port *target = host_to_target(scmnd->device->host);
1348 struct srp_request *req, *tmp;
1350 printk(KERN_ERR "SRP reset_device called\n");
1352 if (srp_find_req(target, scmnd, &req))
1354 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1356 if (req->tsk_status)
1359 spin_lock_irq(target->scsi_host->host_lock);
1361 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1362 if (req->scmnd->device == scmnd->device)
1363 srp_reset_req(target, req);
1365 spin_unlock_irq(target->scsi_host->host_lock);
1370 static int srp_reset_host(struct scsi_cmnd *scmnd)
1372 struct srp_target_port *target = host_to_target(scmnd->device->host);
1375 printk(KERN_ERR PFX "SRP reset_host called\n");
1377 if (!srp_reconnect_target(target))
1383 static ssize_t show_id_ext(struct class_device *cdev, char *buf)
1385 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1387 if (target->state == SRP_TARGET_DEAD ||
1388 target->state == SRP_TARGET_REMOVED)
1391 return sprintf(buf, "0x%016llx\n",
1392 (unsigned long long) be64_to_cpu(target->id_ext));
1395 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf)
1397 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1399 if (target->state == SRP_TARGET_DEAD ||
1400 target->state == SRP_TARGET_REMOVED)
1403 return sprintf(buf, "0x%016llx\n",
1404 (unsigned long long) be64_to_cpu(target->ioc_guid));
1407 static ssize_t show_service_id(struct class_device *cdev, char *buf)
1409 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1411 if (target->state == SRP_TARGET_DEAD ||
1412 target->state == SRP_TARGET_REMOVED)
1415 return sprintf(buf, "0x%016llx\n",
1416 (unsigned long long) be64_to_cpu(target->service_id));
1419 static ssize_t show_pkey(struct class_device *cdev, char *buf)
1421 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1423 if (target->state == SRP_TARGET_DEAD ||
1424 target->state == SRP_TARGET_REMOVED)
1427 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1430 static ssize_t show_dgid(struct class_device *cdev, char *buf)
1432 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1434 if (target->state == SRP_TARGET_DEAD ||
1435 target->state == SRP_TARGET_REMOVED)
1438 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1439 be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]),
1440 be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]),
1441 be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]),
1442 be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]),
1443 be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]),
1444 be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]),
1445 be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]),
1446 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
1449 static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1451 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1453 if (target->state == SRP_TARGET_DEAD ||
1454 target->state == SRP_TARGET_REMOVED)
1457 return sprintf(buf, "%d\n", target->zero_req_lim);
1460 static ssize_t show_local_ib_port(struct class_device *cdev, char *buf)
1462 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1464 return sprintf(buf, "%d\n", target->srp_host->port);
1467 static ssize_t show_local_ib_device(struct class_device *cdev, char *buf)
1469 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1471 return sprintf(buf, "%s\n", target->srp_host->dev->dev->name);
1474 static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1475 static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1476 static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1477 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1478 static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1479 static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1480 static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1481 static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1483 static struct class_device_attribute *srp_host_attrs[] = {
1484 &class_device_attr_id_ext,
1485 &class_device_attr_ioc_guid,
1486 &class_device_attr_service_id,
1487 &class_device_attr_pkey,
1488 &class_device_attr_dgid,
1489 &class_device_attr_zero_req_lim,
1490 &class_device_attr_local_ib_port,
1491 &class_device_attr_local_ib_device,
1495 static struct scsi_host_template srp_template = {
1496 .module = THIS_MODULE,
1498 .info = srp_target_info,
1499 .queuecommand = srp_queuecommand,
1500 .eh_abort_handler = srp_abort,
1501 .eh_device_reset_handler = srp_reset_device,
1502 .eh_host_reset_handler = srp_reset_host,
1503 .can_queue = SRP_SQ_SIZE,
1505 .cmd_per_lun = SRP_SQ_SIZE,
1506 .use_clustering = ENABLE_CLUSTERING,
1507 .shost_attrs = srp_host_attrs
1510 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1512 sprintf(target->target_name, "SRP.T10:%016llX",
1513 (unsigned long long) be64_to_cpu(target->id_ext));
1515 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1518 spin_lock(&host->target_lock);
1519 list_add_tail(&target->list, &host->target_list);
1520 spin_unlock(&host->target_lock);
1522 target->state = SRP_TARGET_LIVE;
1524 scsi_scan_target(&target->scsi_host->shost_gendev,
1525 0, target->scsi_id, SCAN_WILD_CARD, 0);
1530 static void srp_release_class_dev(struct class_device *class_dev)
1532 struct srp_host *host =
1533 container_of(class_dev, struct srp_host, class_dev);
1535 complete(&host->released);
1538 static struct class srp_class = {
1539 .name = "infiniband_srp",
1540 .release = srp_release_class_dev
1544 * Target ports are added by writing
1546 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1547 * pkey=<P_Key>,service_id=<service ID>
1549 * to the add_target sysfs attribute.
1553 SRP_OPT_ID_EXT = 1 << 0,
1554 SRP_OPT_IOC_GUID = 1 << 1,
1555 SRP_OPT_DGID = 1 << 2,
1556 SRP_OPT_PKEY = 1 << 3,
1557 SRP_OPT_SERVICE_ID = 1 << 4,
1558 SRP_OPT_MAX_SECT = 1 << 5,
1559 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1560 SRP_OPT_IO_CLASS = 1 << 7,
1561 SRP_OPT_INITIATOR_EXT = 1 << 8,
1562 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1566 SRP_OPT_SERVICE_ID),
1569 static match_table_t srp_opt_tokens = {
1570 { SRP_OPT_ID_EXT, "id_ext=%s" },
1571 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1572 { SRP_OPT_DGID, "dgid=%s" },
1573 { SRP_OPT_PKEY, "pkey=%x" },
1574 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1575 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1576 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
1577 { SRP_OPT_IO_CLASS, "io_class=%x" },
1578 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
1579 { SRP_OPT_ERR, NULL }
1582 static int srp_parse_options(const char *buf, struct srp_target_port *target)
1584 char *options, *sep_opt;
1587 substring_t args[MAX_OPT_ARGS];
1593 options = kstrdup(buf, GFP_KERNEL);
1598 while ((p = strsep(&sep_opt, ",")) != NULL) {
1602 token = match_token(p, srp_opt_tokens, args);
1606 case SRP_OPT_ID_EXT:
1607 p = match_strdup(args);
1608 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1612 case SRP_OPT_IOC_GUID:
1613 p = match_strdup(args);
1614 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1619 p = match_strdup(args);
1620 if (strlen(p) != 32) {
1621 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1626 for (i = 0; i < 16; ++i) {
1627 strlcpy(dgid, p + i * 2, 3);
1628 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1634 if (match_hex(args, &token)) {
1635 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1638 target->path.pkey = cpu_to_be16(token);
1641 case SRP_OPT_SERVICE_ID:
1642 p = match_strdup(args);
1643 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
1647 case SRP_OPT_MAX_SECT:
1648 if (match_int(args, &token)) {
1649 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1652 target->scsi_host->max_sectors = token;
1655 case SRP_OPT_MAX_CMD_PER_LUN:
1656 if (match_int(args, &token)) {
1657 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1660 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
1663 case SRP_OPT_IO_CLASS:
1664 if (match_hex(args, &token)) {
1665 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
1668 if (token != SRP_REV10_IB_IO_CLASS &&
1669 token != SRP_REV16A_IB_IO_CLASS) {
1670 printk(KERN_WARNING PFX "unknown IO class parameter value"
1671 " %x specified (use %x or %x).\n",
1672 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1675 target->io_class = token;
1678 case SRP_OPT_INITIATOR_EXT:
1679 p = match_strdup(args);
1680 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1685 printk(KERN_WARNING PFX "unknown parameter or missing value "
1686 "'%s' in target creation request\n", p);
1691 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1694 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1695 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1696 !(srp_opt_tokens[i].token & opt_mask))
1697 printk(KERN_WARNING PFX "target creation request is "
1698 "missing parameter '%s'\n",
1699 srp_opt_tokens[i].pattern);
1706 static ssize_t srp_create_target(struct class_device *class_dev,
1707 const char *buf, size_t count)
1709 struct srp_host *host =
1710 container_of(class_dev, struct srp_host, class_dev);
1711 struct Scsi_Host *target_host;
1712 struct srp_target_port *target;
1716 target_host = scsi_host_alloc(&srp_template,
1717 sizeof (struct srp_target_port));
1721 target_host->max_lun = SRP_MAX_LUN;
1722 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
1724 target = host_to_target(target_host);
1726 target->io_class = SRP_REV16A_IB_IO_CLASS;
1727 target->scsi_host = target_host;
1728 target->srp_host = host;
1730 INIT_LIST_HEAD(&target->free_reqs);
1731 INIT_LIST_HEAD(&target->req_queue);
1732 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1733 target->req_ring[i].index = i;
1734 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1737 ret = srp_parse_options(buf, target);
1741 ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
1743 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1744 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1745 (unsigned long long) be64_to_cpu(target->id_ext),
1746 (unsigned long long) be64_to_cpu(target->ioc_guid),
1747 be16_to_cpu(target->path.pkey),
1748 (unsigned long long) be64_to_cpu(target->service_id),
1749 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]),
1750 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]),
1751 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]),
1752 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]),
1753 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]),
1754 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]),
1755 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]),
1756 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14]));
1758 ret = srp_create_target_ib(target);
1762 target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
1763 if (IS_ERR(target->cm_id)) {
1764 ret = PTR_ERR(target->cm_id);
1768 ret = srp_connect_target(target);
1770 printk(KERN_ERR PFX "Connection failed\n");
1774 ret = srp_add_target(host, target);
1776 goto err_disconnect;
1781 srp_disconnect_target(target);
1784 ib_destroy_cm_id(target->cm_id);
1787 srp_free_target_ib(target);
1790 scsi_host_put(target_host);
1795 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
1797 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1799 struct srp_host *host =
1800 container_of(class_dev, struct srp_host, class_dev);
1802 return sprintf(buf, "%s\n", host->dev->dev->name);
1805 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1807 static ssize_t show_port(struct class_device *class_dev, char *buf)
1809 struct srp_host *host =
1810 container_of(class_dev, struct srp_host, class_dev);
1812 return sprintf(buf, "%d\n", host->port);
1815 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
1817 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
1819 struct srp_host *host;
1821 host = kzalloc(sizeof *host, GFP_KERNEL);
1825 INIT_LIST_HEAD(&host->target_list);
1826 spin_lock_init(&host->target_lock);
1827 init_completion(&host->released);
1831 host->class_dev.class = &srp_class;
1832 host->class_dev.dev = device->dev->dma_device;
1833 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
1834 device->dev->name, port);
1836 if (class_device_register(&host->class_dev))
1838 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
1840 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
1842 if (class_device_create_file(&host->class_dev, &class_device_attr_port))
1848 class_device_unregister(&host->class_dev);
1856 static void srp_add_one(struct ib_device *device)
1858 struct srp_device *srp_dev;
1859 struct ib_device_attr *dev_attr;
1860 struct ib_fmr_pool_param fmr_param;
1861 struct srp_host *host;
1864 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
1868 if (ib_query_device(device, dev_attr)) {
1869 printk(KERN_WARNING PFX "Query device failed for %s\n",
1874 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
1879 * Use the smallest page size supported by the HCA, down to a
1880 * minimum of 512 bytes (which is the smallest sector that a
1881 * SCSI command will ever carry).
1883 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
1884 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift;
1885 srp_dev->fmr_page_mask = ~((unsigned long) srp_dev->fmr_page_size - 1);
1887 INIT_LIST_HEAD(&srp_dev->dev_list);
1889 srp_dev->dev = device;
1890 srp_dev->pd = ib_alloc_pd(device);
1891 if (IS_ERR(srp_dev->pd))
1894 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
1895 IB_ACCESS_LOCAL_WRITE |
1896 IB_ACCESS_REMOTE_READ |
1897 IB_ACCESS_REMOTE_WRITE);
1898 if (IS_ERR(srp_dev->mr))
1901 memset(&fmr_param, 0, sizeof fmr_param);
1902 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
1903 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
1904 fmr_param.cache = 1;
1905 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
1906 fmr_param.page_shift = srp_dev->fmr_page_shift;
1907 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
1908 IB_ACCESS_REMOTE_WRITE |
1909 IB_ACCESS_REMOTE_READ);
1911 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
1912 if (IS_ERR(srp_dev->fmr_pool))
1913 srp_dev->fmr_pool = NULL;
1915 if (device->node_type == RDMA_NODE_IB_SWITCH) {
1920 e = device->phys_port_cnt;
1923 for (p = s; p <= e; ++p) {
1924 host = srp_add_port(srp_dev, p);
1926 list_add_tail(&host->list, &srp_dev->dev_list);
1929 ib_set_client_data(device, &srp_client, srp_dev);
1934 ib_dealloc_pd(srp_dev->pd);
1943 static void srp_remove_one(struct ib_device *device)
1945 struct srp_device *srp_dev;
1946 struct srp_host *host, *tmp_host;
1947 LIST_HEAD(target_list);
1948 struct srp_target_port *target, *tmp_target;
1950 srp_dev = ib_get_client_data(device, &srp_client);
1952 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
1953 class_device_unregister(&host->class_dev);
1955 * Wait for the sysfs entry to go away, so that no new
1956 * target ports can be created.
1958 wait_for_completion(&host->released);
1961 * Mark all target ports as removed, so we stop queueing
1962 * commands and don't try to reconnect.
1964 spin_lock(&host->target_lock);
1965 list_for_each_entry(target, &host->target_list, list) {
1966 spin_lock_irq(target->scsi_host->host_lock);
1967 target->state = SRP_TARGET_REMOVED;
1968 spin_unlock_irq(target->scsi_host->host_lock);
1970 spin_unlock(&host->target_lock);
1973 * Wait for any reconnection tasks that may have
1974 * started before we marked our target ports as
1975 * removed, and any target port removal tasks.
1977 flush_scheduled_work();
1979 list_for_each_entry_safe(target, tmp_target,
1980 &host->target_list, list) {
1981 scsi_remove_host(target->scsi_host);
1982 srp_disconnect_target(target);
1983 ib_destroy_cm_id(target->cm_id);
1984 srp_free_target_ib(target);
1985 scsi_host_put(target->scsi_host);
1991 if (srp_dev->fmr_pool)
1992 ib_destroy_fmr_pool(srp_dev->fmr_pool);
1993 ib_dereg_mr(srp_dev->mr);
1994 ib_dealloc_pd(srp_dev->pd);
1999 static int __init srp_init_module(void)
2003 srp_template.sg_tablesize = srp_sg_tablesize;
2004 srp_max_iu_len = (sizeof (struct srp_cmd) +
2005 sizeof (struct srp_indirect_buf) +
2006 srp_sg_tablesize * 16);
2008 ret = class_register(&srp_class);
2010 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2014 ib_sa_register_client(&srp_sa_client);
2016 ret = ib_register_client(&srp_client);
2018 printk(KERN_ERR PFX "couldn't register IB client\n");
2019 ib_sa_unregister_client(&srp_sa_client);
2020 class_unregister(&srp_class);
2027 static void __exit srp_cleanup_module(void)
2029 ib_unregister_client(&srp_client);
2030 ib_sa_unregister_client(&srp_sa_client);
2031 class_unregister(&srp_class);
2034 module_init(srp_init_module);
2035 module_exit(srp_cleanup_module);