X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=drivers%2Finfiniband%2Fhw%2Fipath%2Fipath_qp.c;h=83e557be591ed4165432cd6d51d0094d20cc719e;hb=b4b9034132c7e1e4474999e688dd7d03b7d97a99;hp=6058d70d7577bb349ab7e5db759e1e3d37db4fc3;hpb=e28c00ad67164dba688c1d19c208c5fb554465f2;p=linux-2.6-omap-h63xx.git diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 6058d70d757..83e557be591 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2006 QLogic, Inc. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -34,7 +35,7 @@ #include #include "ipath_verbs.h" -#include "ips_common.h" +#include "ipath_common.h" #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) @@ -188,8 +189,8 @@ static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) * Allocate the next available QPN and put the QP into the hash table. * The hash table holds a reference to the QP. */ -int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, - enum ib_qp_type type) +static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, + enum ib_qp_type type) { unsigned long flags; u32 qpn; @@ -232,7 +233,7 @@ bail: * Remove the QP from the table so it can't be found asynchronously by * the receive interrupt routine. */ -void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) +static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) { struct ipath_qp *q, **qpp; unsigned long flags; @@ -332,10 +333,11 @@ static void ipath_reset_qp(struct ipath_qp *qp) qp->remote_qpn = 0; qp->qkey = 0; qp->qp_access_flags = 0; + clear_bit(IPATH_S_BUSY, &qp->s_flags); qp->s_hdrwords = 0; qp->s_psn = 0; qp->r_psn = 0; - atomic_set(&qp->msn, 0); + qp->r_msn = 0; if (qp->ibqp.qp_type == IB_QPT_RC) { qp->s_state = IB_OPCODE_RC_SEND_LAST; qp->r_state = IB_OPCODE_RC_SEND_LAST; @@ -344,7 +346,8 @@ static void ipath_reset_qp(struct ipath_qp *qp) qp->r_state = IB_OPCODE_UC_SEND_LAST; } qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; - qp->s_nak_state = 0; + qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; + qp->r_nak_state = 0; qp->s_rnr_timeout = 0; qp->s_head = 0; qp->s_tail = 0; @@ -357,6 +360,67 @@ static void ipath_reset_qp(struct ipath_qp *qp) qp->r_reuse_sge = 0; } +/** + * ipath_error_qp - put a QP into an error state + * @qp: the QP to put into an error state + * + * Flushes both send and receive work queues. + * QP s_lock should be held and interrupts disabled. + */ + +void ipath_error_qp(struct ipath_qp *qp) +{ + struct ipath_ibdev *dev = to_idev(qp->ibqp.device); + struct ib_wc wc; + + _VERBS_INFO("QP%d/%d in error state\n", + qp->ibqp.qp_num, qp->remote_qpn); + + spin_lock(&dev->pending_lock); + /* XXX What if its already removed by the timeout code? */ + if (!list_empty(&qp->timerwait)) + list_del_init(&qp->timerwait); + if (!list_empty(&qp->piowait)) + list_del_init(&qp->piowait); + spin_unlock(&dev->pending_lock); + + wc.status = IB_WC_WR_FLUSH_ERR; + wc.vendor_err = 0; + wc.byte_len = 0; + wc.imm_data = 0; + wc.qp_num = qp->ibqp.qp_num; + wc.src_qp = 0; + wc.wc_flags = 0; + wc.pkey_index = 0; + wc.slid = 0; + wc.sl = 0; + wc.dlid_path_bits = 0; + wc.port_num = 0; + + while (qp->s_last != qp->s_head) { + struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); + + wc.wr_id = wqe->wr.wr_id; + wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; + if (++qp->s_last >= qp->s_size) + qp->s_last = 0; + ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); + } + qp->s_cur = qp->s_tail = qp->s_head; + qp->s_hdrwords = 0; + qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; + + wc.opcode = IB_WC_RECV; + spin_lock(&qp->r_rq.lock); + while (qp->r_rq.tail != qp->r_rq.head) { + wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; + if (++qp->r_rq.tail >= qp->r_rq.size) + qp->r_rq.tail = 0; + ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + } + spin_unlock(&qp->r_rq.lock); +} + /** * ipath_modify_qp - modify the attributes of a queue pair * @ibqp: the queue pair who's attributes we're modifying @@ -368,13 +432,13 @@ static void ipath_reset_qp(struct ipath_qp *qp) int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) { + struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); enum ib_qp_state cur_state, new_state; unsigned long flags; int ret; - spin_lock_irqsave(&qp->r_rq.lock, flags); - spin_lock(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; @@ -384,6 +448,19 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, attr_mask)) goto inval; + if (attr_mask & IB_QP_AV) + if (attr->ah_attr.dlid == 0 || + attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) + goto inval; + + if (attr_mask & IB_QP_PKEY_INDEX) + if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) + goto inval; + + if (attr_mask & IB_QP_MIN_RNR_TIMER) + if (attr->min_rnr_timer > 31) + goto inval; + switch (new_state) { case IB_QPS_RESET: ipath_reset_qp(qp); @@ -398,13 +475,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } - if (attr_mask & IB_QP_PKEY_INDEX) { - struct ipath_ibdev *dev = to_idev(ibqp->device); - - if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) - goto inval; + if (attr_mask & IB_QP_PKEY_INDEX) qp->s_pkey_index = attr->pkey_index; - } if (attr_mask & IB_QP_DEST_QPN) qp->remote_qpn = attr->dest_qp_num; @@ -420,12 +492,8 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_ACCESS_FLAGS) qp->qp_access_flags = attr->qp_access_flags; - if (attr_mask & IB_QP_AV) { - if (attr->ah_attr.dlid == 0 || - attr->ah_attr.dlid >= IPS_MULTICAST_LID_BASE) - goto inval; + if (attr_mask & IB_QP_AV) qp->remote_ah_attr = attr->ah_attr; - } if (attr_mask & IB_QP_PATH_MTU) qp->path_mtu = attr->path_mtu; @@ -440,38 +508,20 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, qp->s_rnr_retry_cnt = qp->s_rnr_retry; } - if (attr_mask & IB_QP_MIN_RNR_TIMER) { - if (attr->min_rnr_timer > 31) - goto inval; - qp->s_min_rnr_timer = attr->min_rnr_timer; - } + if (attr_mask & IB_QP_MIN_RNR_TIMER) + qp->r_min_rnr_timer = attr->min_rnr_timer; if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; - if (attr_mask & IB_QP_PKEY_INDEX) - qp->s_pkey_index = attr->pkey_index; - qp->state = new_state; - spin_unlock(&qp->s_lock); - spin_unlock_irqrestore(&qp->r_rq.lock, flags); + spin_unlock_irqrestore(&qp->s_lock, flags); - /* - * If QP1 changed to the RTS state, try to move to the link to INIT - * even if it was ACTIVE so the SM will reinitialize the SMA's - * state. - */ - if (qp->ibqp.qp_num == 1 && new_state == IB_QPS_RTS) { - struct ipath_ibdev *dev = to_idev(ibqp->device); - - ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); - } ret = 0; goto bail; inval: - spin_unlock(&qp->s_lock); - spin_unlock_irqrestore(&qp->r_rq.lock, flags); + spin_unlock_irqrestore(&qp->s_lock, flags); ret = -EINVAL; bail: @@ -505,7 +555,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, attr->sq_draining = 0; attr->max_rd_atomic = 1; attr->max_dest_rd_atomic = 1; - attr->min_rnr_timer = qp->s_min_rnr_timer; + attr->min_rnr_timer = qp->r_min_rnr_timer; attr->port_num = 1; attr->timeout = 0; attr->retry_cnt = qp->s_retry_cnt; @@ -532,21 +582,17 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, * @qp: the queue pair to compute the AETH for * * Returns the AETH. - * - * The QP s_lock should be held. */ __be32 ipath_compute_aeth(struct ipath_qp *qp) { - u32 aeth = atomic_read(&qp->msn) & IPS_MSN_MASK; + u32 aeth = qp->r_msn & IPATH_MSN_MASK; - if (qp->s_nak_state) { - aeth |= qp->s_nak_state << IPS_AETH_CREDIT_SHIFT; - } else if (qp->ibqp.srq) { + if (qp->ibqp.srq) { /* * Shared receive queues don't generate credits. * Set the credit field to the invalid value. */ - aeth |= IPS_AETH_CREDIT_INVAL << IPS_AETH_CREDIT_SHIFT; + aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT; } else { u32 min, max, x; u32 credits; @@ -576,7 +622,7 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp) else min = x; } - aeth |= x << IPS_AETH_CREDIT_SHIFT; + aeth |= x << IPATH_AETH_CREDIT_SHIFT; } return cpu_to_be32(aeth); } @@ -602,12 +648,22 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, size_t sz; struct ib_qp *ret; - if (init_attr->cap.max_send_sge > 255 || - init_attr->cap.max_recv_sge > 255) { + if (init_attr->cap.max_send_sge > ib_ipath_max_sges || + init_attr->cap.max_recv_sge > ib_ipath_max_sges || + init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs || + init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) { ret = ERR_PTR(-ENOMEM); goto bail; } + if (init_attr->cap.max_send_sge + + init_attr->cap.max_recv_sge + + init_attr->cap.max_send_wr + + init_attr->cap.max_recv_wr == 0) { + ret = ERR_PTR(-EINVAL); + goto bail; + } + switch (init_attr->qp_type) { case IB_QPT_UC: case IB_QPT_RC: @@ -625,18 +681,26 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, case IB_QPT_GSI: qp = kmalloc(sizeof(*qp), GFP_KERNEL); if (!qp) { + vfree(swq); ret = ERR_PTR(-ENOMEM); goto bail; } - qp->r_rq.size = init_attr->cap.max_recv_wr + 1; - sz = sizeof(struct ipath_sge) * - init_attr->cap.max_recv_sge + - sizeof(struct ipath_rwqe); - qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); - if (!qp->r_rq.wq) { - kfree(qp); - ret = ERR_PTR(-ENOMEM); - goto bail; + if (init_attr->srq) { + qp->r_rq.size = 0; + qp->r_rq.max_sge = 0; + qp->r_rq.wq = NULL; + } else { + qp->r_rq.size = init_attr->cap.max_recv_wr + 1; + qp->r_rq.max_sge = init_attr->cap.max_recv_sge; + sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) + + sizeof(struct ipath_rwqe); + qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); + if (!qp->r_rq.wq) { + kfree(qp); + vfree(swq); + ret = ERR_PTR(-ENOMEM); + goto bail; + } } /* @@ -647,19 +711,14 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, spin_lock_init(&qp->r_rq.lock); atomic_set(&qp->refcount, 0); init_waitqueue_head(&qp->wait); - tasklet_init(&qp->s_task, - init_attr->qp_type == IB_QPT_RC ? - ipath_do_rc_send : ipath_do_uc_send, + tasklet_init(&qp->s_task, ipath_do_ruc_send, (unsigned long)qp); - qp->piowait.next = LIST_POISON1; - qp->piowait.prev = LIST_POISON2; - qp->timerwait.next = LIST_POISON1; - qp->timerwait.prev = LIST_POISON2; + INIT_LIST_HEAD(&qp->piowait); + INIT_LIST_HEAD(&qp->timerwait); qp->state = IB_QPS_RESET; qp->s_wq = swq; qp->s_size = init_attr->cap.max_send_wr + 1; qp->s_max_sge = init_attr->cap.max_send_sge; - qp->r_rq.max_sge = init_attr->cap.max_recv_sge; qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ? 1 << IPATH_S_SIGNAL_REQ_WR : 0; dev = to_idev(ibpd->device); @@ -675,7 +734,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, ipath_reset_qp(qp); /* Tell the core driver that the kernel SMA is present. */ - if (qp->ibqp.qp_type == IB_QPT_SMI) + if (init_attr->qp_type == IB_QPT_SMI) ipath_layer_set_verbs_flags(dev->dd, IPATH_VERBS_KERNEL_SMA); break; @@ -724,10 +783,10 @@ int ipath_destroy_qp(struct ib_qp *ibqp) /* Make sure the QP isn't on the timeout list. */ spin_lock_irqsave(&dev->pending_lock, flags); - if (qp->timerwait.next != LIST_POISON1) - list_del(&qp->timerwait); - if (qp->piowait.next != LIST_POISON1) - list_del(&qp->piowait); + if (!list_empty(&qp->timerwait)) + list_del_init(&qp->timerwait); + if (!list_empty(&qp->piowait)) + list_del_init(&qp->piowait); spin_unlock_irqrestore(&dev->pending_lock, flags); /* @@ -796,10 +855,10 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) spin_lock(&dev->pending_lock); /* XXX What if its already removed by the timeout code? */ - if (qp->timerwait.next != LIST_POISON1) - list_del(&qp->timerwait); - if (qp->piowait.next != LIST_POISON1) - list_del(&qp->piowait); + if (!list_empty(&qp->timerwait)) + list_del_init(&qp->timerwait); + if (!list_empty(&qp->piowait)) + list_del_init(&qp->piowait); spin_unlock(&dev->pending_lock); ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); @@ -820,65 +879,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) qp->state = IB_QPS_SQE; } -/** - * ipath_error_qp - put a QP into an error state - * @qp: the QP to put into an error state - * - * Flushes both send and receive work queues. - * QP r_rq.lock and s_lock should be held. - */ - -void ipath_error_qp(struct ipath_qp *qp) -{ - struct ipath_ibdev *dev = to_idev(qp->ibqp.device); - struct ib_wc wc; - - _VERBS_INFO("QP%d/%d in error state\n", - qp->ibqp.qp_num, qp->remote_qpn); - - spin_lock(&dev->pending_lock); - /* XXX What if its already removed by the timeout code? */ - if (qp->timerwait.next != LIST_POISON1) - list_del(&qp->timerwait); - if (qp->piowait.next != LIST_POISON1) - list_del(&qp->piowait); - spin_unlock(&dev->pending_lock); - - wc.status = IB_WC_WR_FLUSH_ERR; - wc.vendor_err = 0; - wc.byte_len = 0; - wc.imm_data = 0; - wc.qp_num = qp->ibqp.qp_num; - wc.src_qp = 0; - wc.wc_flags = 0; - wc.pkey_index = 0; - wc.slid = 0; - wc.sl = 0; - wc.dlid_path_bits = 0; - wc.port_num = 0; - - while (qp->s_last != qp->s_head) { - struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); - - wc.wr_id = wqe->wr.wr_id; - wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; - ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); - } - qp->s_cur = qp->s_tail = qp->s_head; - qp->s_hdrwords = 0; - qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; - - wc.opcode = IB_WC_RECV; - while (qp->r_rq.tail != qp->r_rq.head) { - wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; - if (++qp->r_rq.tail >= qp->r_rq.size) - qp->r_rq.tail = 0; - ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); - } -} - /** * ipath_get_credit - flush the send work queue of a QP * @qp: the qp who's send work queue to flush @@ -888,18 +888,18 @@ void ipath_error_qp(struct ipath_qp *qp) */ void ipath_get_credit(struct ipath_qp *qp, u32 aeth) { - u32 credit = (aeth >> IPS_AETH_CREDIT_SHIFT) & IPS_AETH_CREDIT_MASK; + u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK; /* * If the credit is invalid, we can send * as many packets as we like. Otherwise, we have to * honor the credit field. */ - if (credit == IPS_AETH_CREDIT_INVAL) { + if (credit == IPATH_AETH_CREDIT_INVAL) qp->s_lsn = (u32) -1; - } else if (qp->s_lsn != (u32) -1) { + else if (qp->s_lsn != (u32) -1) { /* Compute new LSN (i.e., MSN + credit) */ - credit = (aeth + credit_table[credit]) & IPS_MSN_MASK; + credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK; if (ipath_cmp24(credit, qp->s_lsn) > 0) qp->s_lsn = credit; }