2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
38 #include <linux/init.h>
40 #include <rdma/ib_verbs.h>
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_pack.h>
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
46 #include "mthca_memfree.h"
47 #include "mthca_wqe.h"
50 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
51 MTHCA_ACK_REQ_FREQ = 10,
52 MTHCA_FLIGHT_LIMIT = 9,
53 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
54 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
55 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
59 MTHCA_QP_STATE_RST = 0,
60 MTHCA_QP_STATE_INIT = 1,
61 MTHCA_QP_STATE_RTR = 2,
62 MTHCA_QP_STATE_RTS = 3,
63 MTHCA_QP_STATE_SQE = 4,
64 MTHCA_QP_STATE_SQD = 5,
65 MTHCA_QP_STATE_ERR = 6,
66 MTHCA_QP_STATE_DRAINING = 7
78 MTHCA_QP_PM_MIGRATED = 0x3,
79 MTHCA_QP_PM_ARMED = 0x0,
80 MTHCA_QP_PM_REARM = 0x1
84 /* qp_context flags */
85 MTHCA_QP_BIT_DE = 1 << 8,
87 MTHCA_QP_BIT_SRE = 1 << 15,
88 MTHCA_QP_BIT_SWE = 1 << 14,
89 MTHCA_QP_BIT_SAE = 1 << 13,
90 MTHCA_QP_BIT_SIC = 1 << 4,
91 MTHCA_QP_BIT_SSC = 1 << 3,
93 MTHCA_QP_BIT_RRE = 1 << 15,
94 MTHCA_QP_BIT_RWE = 1 << 14,
95 MTHCA_QP_BIT_RAE = 1 << 13,
96 MTHCA_QP_BIT_RIC = 1 << 4,
97 MTHCA_QP_BIT_RSC = 1 << 3
100 struct mthca_qp_path {
109 __be32 sl_tclass_flowlabel;
111 } __attribute__((packed));
113 struct mthca_qp_context {
115 __be32 tavor_sched_queue; /* Reserved on Arbel */
117 u8 rq_size_stride; /* Reserved on Tavor */
118 u8 sq_size_stride; /* Reserved on Tavor */
119 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
124 struct mthca_qp_path pri_path;
125 struct mthca_qp_path alt_path;
132 __be32 next_send_psn;
134 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
135 __be32 snd_db_index; /* (debugging only entries) */
136 __be32 last_acked_psn;
139 __be32 rnr_nextrecvpsn;
142 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
143 __be32 rcv_db_index; /* (debugging only entries) */
147 __be16 rq_wqe_counter; /* reserved on Tavor */
148 __be16 sq_wqe_counter; /* reserved on Tavor */
150 } __attribute__((packed));
152 struct mthca_qp_param {
153 __be32 opt_param_mask;
155 struct mthca_qp_context context;
157 } __attribute__((packed));
160 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
161 MTHCA_QP_OPTPAR_RRE = 1 << 1,
162 MTHCA_QP_OPTPAR_RAE = 1 << 2,
163 MTHCA_QP_OPTPAR_RWE = 1 << 3,
164 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
165 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
166 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
167 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
168 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
169 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
170 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
171 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
172 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
173 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
174 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
175 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
176 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
179 static const u8 mthca_opcode[] = {
180 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
181 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
182 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
183 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
184 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
185 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
186 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
189 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
191 return qp->qpn >= dev->qp_table.sqp_start &&
192 qp->qpn <= dev->qp_table.sqp_start + 3;
195 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
197 return qp->qpn >= dev->qp_table.sqp_start &&
198 qp->qpn <= dev->qp_table.sqp_start + 1;
201 static void *get_recv_wqe(struct mthca_qp *qp, int n)
204 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
206 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
207 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
210 static void *get_send_wqe(struct mthca_qp *qp, int n)
213 return qp->queue.direct.buf + qp->send_wqe_offset +
214 (n << qp->sq.wqe_shift);
216 return qp->queue.page_list[(qp->send_wqe_offset +
217 (n << qp->sq.wqe_shift)) >>
219 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
223 static void mthca_wq_init(struct mthca_wq *wq)
225 spin_lock_init(&wq->lock);
227 wq->last_comp = wq->max - 1;
233 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
234 enum ib_event_type event_type)
237 struct ib_event event;
239 spin_lock(&dev->qp_table.lock);
240 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
242 atomic_inc(&qp->refcount);
243 spin_unlock(&dev->qp_table.lock);
246 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
250 event.device = &dev->ib_dev;
251 event.event = event_type;
252 event.element.qp = &qp->ibqp;
253 if (qp->ibqp.event_handler)
254 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
256 if (atomic_dec_and_test(&qp->refcount))
260 static int to_mthca_state(enum ib_qp_state ib_state)
263 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
264 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
265 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
266 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
267 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
268 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
269 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
274 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
276 static int to_mthca_st(int transport)
279 case RC: return MTHCA_QP_ST_RC;
280 case UC: return MTHCA_QP_ST_UC;
281 case UD: return MTHCA_QP_ST_UD;
282 case RD: return MTHCA_QP_ST_RD;
283 case MLX: return MTHCA_QP_ST_MLX;
288 static const struct {
290 u32 req_param[NUM_TRANS];
291 u32 opt_param[NUM_TRANS];
292 } state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
294 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
295 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
297 .trans = MTHCA_TRANS_RST2INIT,
299 [UD] = (IB_QP_PKEY_INDEX |
302 [UC] = (IB_QP_PKEY_INDEX |
305 [RC] = (IB_QP_PKEY_INDEX |
308 [MLX] = (IB_QP_PKEY_INDEX |
311 /* bug-for-bug compatibility with VAPI: */
318 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
319 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
321 .trans = MTHCA_TRANS_INIT2INIT,
323 [UD] = (IB_QP_PKEY_INDEX |
326 [UC] = (IB_QP_PKEY_INDEX |
329 [RC] = (IB_QP_PKEY_INDEX |
332 [MLX] = (IB_QP_PKEY_INDEX |
337 .trans = MTHCA_TRANS_INIT2RTR,
343 IB_QP_MAX_DEST_RD_ATOMIC),
348 IB_QP_MAX_DEST_RD_ATOMIC |
349 IB_QP_MIN_RNR_TIMER),
352 [UD] = (IB_QP_PKEY_INDEX |
354 [UC] = (IB_QP_ALT_PATH |
357 [RC] = (IB_QP_ALT_PATH |
360 [MLX] = (IB_QP_PKEY_INDEX |
366 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
367 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
369 .trans = MTHCA_TRANS_RTR2RTS,
372 [UC] = (IB_QP_SQ_PSN |
373 IB_QP_MAX_QP_RD_ATOMIC),
374 [RC] = (IB_QP_TIMEOUT |
378 IB_QP_MAX_QP_RD_ATOMIC),
379 [MLX] = IB_QP_SQ_PSN,
382 [UD] = (IB_QP_CUR_STATE |
384 [UC] = (IB_QP_CUR_STATE |
388 IB_QP_PATH_MIG_STATE),
389 [RC] = (IB_QP_CUR_STATE |
393 IB_QP_MIN_RNR_TIMER |
394 IB_QP_PATH_MIG_STATE),
395 [MLX] = (IB_QP_CUR_STATE |
401 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
402 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
404 .trans = MTHCA_TRANS_RTS2RTS,
406 [UD] = (IB_QP_CUR_STATE |
408 [UC] = (IB_QP_ACCESS_FLAGS |
410 IB_QP_PATH_MIG_STATE),
411 [RC] = (IB_QP_ACCESS_FLAGS |
413 IB_QP_PATH_MIG_STATE |
414 IB_QP_MIN_RNR_TIMER),
415 [MLX] = (IB_QP_CUR_STATE |
420 .trans = MTHCA_TRANS_RTS2SQD,
424 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
425 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
427 .trans = MTHCA_TRANS_SQD2RTS,
429 [UD] = (IB_QP_CUR_STATE |
431 [UC] = (IB_QP_CUR_STATE |
434 IB_QP_PATH_MIG_STATE),
435 [RC] = (IB_QP_CUR_STATE |
438 IB_QP_MIN_RNR_TIMER |
439 IB_QP_PATH_MIG_STATE),
440 [MLX] = (IB_QP_CUR_STATE |
445 .trans = MTHCA_TRANS_SQD2SQD,
447 [UD] = (IB_QP_PKEY_INDEX |
450 IB_QP_MAX_QP_RD_ATOMIC |
451 IB_QP_MAX_DEST_RD_ATOMIC |
456 IB_QP_PATH_MIG_STATE),
461 IB_QP_MAX_QP_RD_ATOMIC |
462 IB_QP_MAX_DEST_RD_ATOMIC |
467 IB_QP_MIN_RNR_TIMER |
468 IB_QP_PATH_MIG_STATE),
469 [MLX] = (IB_QP_PKEY_INDEX |
475 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
476 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
478 .trans = MTHCA_TRANS_SQERR2RTS,
480 [UD] = (IB_QP_CUR_STATE |
482 [UC] = (IB_QP_CUR_STATE),
483 [RC] = (IB_QP_CUR_STATE |
484 IB_QP_MIN_RNR_TIMER),
485 [MLX] = (IB_QP_CUR_STATE |
491 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
492 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
496 static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
499 if (attr_mask & IB_QP_PKEY_INDEX)
500 sqp->pkey_index = attr->pkey_index;
501 if (attr_mask & IB_QP_QKEY)
502 sqp->qkey = attr->qkey;
503 if (attr_mask & IB_QP_SQ_PSN)
504 sqp->send_psn = attr->sq_psn;
507 static void init_port(struct mthca_dev *dev, int port)
511 struct mthca_init_ib_param param;
513 memset(¶m, 0, sizeof param);
515 param.port_width = dev->limits.port_width_cap;
516 param.vl_cap = dev->limits.vl_cap;
517 param.mtu_cap = dev->limits.mtu_cap;
518 param.gid_cap = dev->limits.gid_table_len;
519 param.pkey_cap = dev->limits.pkey_table_len;
521 err = mthca_INIT_IB(dev, ¶m, port, &status);
523 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
525 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
528 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
530 struct mthca_dev *dev = to_mdev(ibqp->device);
531 struct mthca_qp *qp = to_mqp(ibqp);
532 enum ib_qp_state cur_state, new_state;
533 struct mthca_mailbox *mailbox;
534 struct mthca_qp_param *qp_param;
535 struct mthca_qp_context *qp_context;
536 u32 req_param, opt_param;
540 if (attr_mask & IB_QP_CUR_STATE) {
541 if (attr->cur_qp_state != IB_QPS_RTR &&
542 attr->cur_qp_state != IB_QPS_RTS &&
543 attr->cur_qp_state != IB_QPS_SQD &&
544 attr->cur_qp_state != IB_QPS_SQE)
547 cur_state = attr->cur_qp_state;
549 spin_lock_irq(&qp->sq.lock);
550 spin_lock(&qp->rq.lock);
551 cur_state = qp->state;
552 spin_unlock(&qp->rq.lock);
553 spin_unlock_irq(&qp->sq.lock);
556 if (attr_mask & IB_QP_STATE) {
557 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
559 new_state = attr->qp_state;
561 new_state = cur_state;
563 if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
564 mthca_dbg(dev, "Illegal QP transition "
565 "%d->%d\n", cur_state, new_state);
569 req_param = state_table[cur_state][new_state].req_param[qp->transport];
570 opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
572 if ((req_param & attr_mask) != req_param) {
573 mthca_dbg(dev, "QP transition "
574 "%d->%d missing req attr 0x%08x\n",
575 cur_state, new_state,
576 req_param & ~attr_mask);
580 if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
581 mthca_dbg(dev, "QP transition (transport %d) "
582 "%d->%d has extra attr 0x%08x\n",
584 cur_state, new_state,
585 attr_mask & ~(req_param | opt_param |
590 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
592 return PTR_ERR(mailbox);
593 qp_param = mailbox->buf;
594 qp_context = &qp_param->context;
595 memset(qp_param, 0, sizeof *qp_param);
597 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
598 (to_mthca_st(qp->transport) << 16));
599 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
600 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
601 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
603 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
604 switch (attr->path_mig_state) {
605 case IB_MIG_MIGRATED:
606 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
609 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
612 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
617 /* leave tavor_sched_queue as 0 */
619 if (qp->transport == MLX || qp->transport == UD)
620 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
621 else if (attr_mask & IB_QP_PATH_MTU)
622 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
624 if (mthca_is_memfree(dev)) {
626 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
627 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
630 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
631 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
634 /* leave arbel_sched_queue as 0 */
636 if (qp->ibqp.uobject)
637 qp_context->usr_page =
638 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
640 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
641 qp_context->local_qpn = cpu_to_be32(qp->qpn);
642 if (attr_mask & IB_QP_DEST_QPN) {
643 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
646 if (qp->transport == MLX)
647 qp_context->pri_path.port_pkey |=
648 cpu_to_be32(to_msqp(qp)->port << 24);
650 if (attr_mask & IB_QP_PORT) {
651 qp_context->pri_path.port_pkey |=
652 cpu_to_be32(attr->port_num << 24);
653 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
657 if (attr_mask & IB_QP_PKEY_INDEX) {
658 qp_context->pri_path.port_pkey |=
659 cpu_to_be32(attr->pkey_index);
660 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
663 if (attr_mask & IB_QP_RNR_RETRY) {
664 qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;
665 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);
668 if (attr_mask & IB_QP_AV) {
669 qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f;
670 qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid);
671 qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;
672 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
673 qp_context->pri_path.g_mylmc |= 1 << 7;
674 qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;
675 qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;
676 qp_context->pri_path.sl_tclass_flowlabel =
677 cpu_to_be32((attr->ah_attr.sl << 28) |
678 (attr->ah_attr.grh.traffic_class << 20) |
679 (attr->ah_attr.grh.flow_label));
680 memcpy(qp_context->pri_path.rgid,
681 attr->ah_attr.grh.dgid.raw, 16);
683 qp_context->pri_path.sl_tclass_flowlabel =
684 cpu_to_be32(attr->ah_attr.sl << 28);
686 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
689 if (attr_mask & IB_QP_TIMEOUT) {
690 qp_context->pri_path.ackto = attr->timeout;
691 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
697 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
698 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
699 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
700 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
701 (MTHCA_FLIGHT_LIMIT << 24) |
705 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
706 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
707 if (attr_mask & IB_QP_RETRY_CNT) {
708 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
709 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
712 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
713 qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?
714 ffs(attr->max_rd_atomic) - 1 : 0,
716 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
719 if (attr_mask & IB_QP_SQ_PSN)
720 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
721 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
723 if (mthca_is_memfree(dev)) {
724 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
725 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
728 if (attr_mask & IB_QP_ACCESS_FLAGS) {
730 * Only enable RDMA/atomics if we have responder
731 * resources set to a non-zero value.
733 if (qp->resp_depth) {
734 qp_context->params2 |=
735 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
736 MTHCA_QP_BIT_RWE : 0);
737 qp_context->params2 |=
738 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
739 MTHCA_QP_BIT_RRE : 0);
740 qp_context->params2 |=
741 cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
742 MTHCA_QP_BIT_RAE : 0);
745 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
746 MTHCA_QP_OPTPAR_RRE |
747 MTHCA_QP_OPTPAR_RAE);
749 qp->atomic_rd_en = attr->qp_access_flags;
752 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
755 if (qp->resp_depth && !attr->max_dest_rd_atomic) {
757 * Lowering our responder resources to zero.
758 * Turn off RDMA/atomics as responder.
759 * (RWE/RRE/RAE in params2 already zero)
761 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
762 MTHCA_QP_OPTPAR_RRE |
763 MTHCA_QP_OPTPAR_RAE);
766 if (!qp->resp_depth && attr->max_dest_rd_atomic) {
768 * Increasing our responder resources from
769 * zero. Turn on RDMA/atomics as appropriate.
771 qp_context->params2 |=
772 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
773 MTHCA_QP_BIT_RWE : 0);
774 qp_context->params2 |=
775 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
776 MTHCA_QP_BIT_RRE : 0);
777 qp_context->params2 |=
778 cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
779 MTHCA_QP_BIT_RAE : 0);
781 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
782 MTHCA_QP_OPTPAR_RRE |
783 MTHCA_QP_OPTPAR_RAE);
787 1 << rra_max < attr->max_dest_rd_atomic &&
788 rra_max < dev->qp_table.rdb_shift;
792 qp_context->params2 |= cpu_to_be32(rra_max << 21);
793 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
795 qp->resp_depth = attr->max_dest_rd_atomic;
798 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
801 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
803 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
804 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
805 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
807 if (attr_mask & IB_QP_RQ_PSN)
808 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
810 qp_context->ra_buff_indx =
811 cpu_to_be32(dev->qp_table.rdb_base +
812 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
813 dev->qp_table.rdb_shift));
815 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
817 if (mthca_is_memfree(dev))
818 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
820 if (attr_mask & IB_QP_QKEY) {
821 qp_context->qkey = cpu_to_be32(attr->qkey);
822 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
826 qp_context->srqn = cpu_to_be32(1 << 24 |
827 to_msrq(ibqp->srq)->srqn);
829 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
830 qp->qpn, 0, mailbox, 0, &status);
832 mthca_warn(dev, "modify QP %d returned status %02x.\n",
833 state_table[cur_state][new_state].trans, status);
838 qp->state = new_state;
840 mthca_free_mailbox(dev, mailbox);
843 store_attrs(to_msqp(qp), attr, attr_mask);
846 * If we moved QP0 to RTR, bring the IB link up; if we moved
847 * QP0 to RESET or ERROR, bring the link back down.
849 if (is_qp0(dev, qp)) {
850 if (cur_state != IB_QPS_RTR &&
851 new_state == IB_QPS_RTR)
852 init_port(dev, to_msqp(qp)->port);
854 if (cur_state != IB_QPS_RESET &&
855 cur_state != IB_QPS_ERR &&
856 (new_state == IB_QPS_RESET ||
857 new_state == IB_QPS_ERR))
858 mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
862 * If we moved a kernel QP to RESET, clean up all old CQ
863 * entries and reinitialize the QP.
865 if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
866 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
867 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
868 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
869 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
870 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
872 mthca_wq_init(&qp->sq);
873 mthca_wq_init(&qp->rq);
875 if (mthca_is_memfree(dev)) {
885 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
886 * rq.max_gs and sq.max_gs must all be assigned.
887 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
888 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
891 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
898 size = sizeof (struct mthca_next_seg) +
899 qp->rq.max_gs * sizeof (struct mthca_data_seg);
901 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
905 size = sizeof (struct mthca_next_seg) +
906 qp->sq.max_gs * sizeof (struct mthca_data_seg);
907 switch (qp->transport) {
909 size += 2 * sizeof (struct mthca_data_seg);
912 if (mthca_is_memfree(dev))
913 size += sizeof (struct mthca_arbel_ud_seg);
915 size += sizeof (struct mthca_tavor_ud_seg);
918 /* bind seg is as big as atomic + raddr segs */
919 size += sizeof (struct mthca_bind_seg);
922 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
926 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
927 1 << qp->sq.wqe_shift);
930 * If this is a userspace QP, we don't actually have to
931 * allocate anything. All we need is to calculate the WQE
932 * sizes and the send_wqe_offset, so we're done now.
934 if (pd->ibpd.uobject)
937 size = PAGE_ALIGN(qp->send_wqe_offset +
938 (qp->sq.max << qp->sq.wqe_shift));
940 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
945 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
946 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
957 static void mthca_free_wqe_buf(struct mthca_dev *dev,
960 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
961 (qp->sq.max << qp->sq.wqe_shift)),
962 &qp->queue, qp->is_direct, &qp->mr);
966 static int mthca_map_memfree(struct mthca_dev *dev,
971 if (mthca_is_memfree(dev)) {
972 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
976 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
980 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
981 qp->qpn << dev->qp_table.rdb_shift);
990 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
993 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
998 static void mthca_unmap_memfree(struct mthca_dev *dev,
1001 mthca_table_put(dev, dev->qp_table.rdb_table,
1002 qp->qpn << dev->qp_table.rdb_shift);
1003 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1004 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1007 static int mthca_alloc_memfree(struct mthca_dev *dev,
1008 struct mthca_qp *qp)
1012 if (mthca_is_memfree(dev)) {
1013 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1014 qp->qpn, &qp->rq.db);
1015 if (qp->rq.db_index < 0)
1018 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1019 qp->qpn, &qp->sq.db);
1020 if (qp->sq.db_index < 0)
1021 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1027 static void mthca_free_memfree(struct mthca_dev *dev,
1028 struct mthca_qp *qp)
1030 if (mthca_is_memfree(dev)) {
1031 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1032 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1036 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1037 struct mthca_pd *pd,
1038 struct mthca_cq *send_cq,
1039 struct mthca_cq *recv_cq,
1040 enum ib_sig_type send_policy,
1041 struct mthca_qp *qp)
1046 atomic_set(&qp->refcount, 1);
1047 qp->state = IB_QPS_RESET;
1048 qp->atomic_rd_en = 0;
1050 qp->sq_policy = send_policy;
1051 mthca_wq_init(&qp->sq);
1052 mthca_wq_init(&qp->rq);
1054 ret = mthca_map_memfree(dev, qp);
1058 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1060 mthca_unmap_memfree(dev, qp);
1065 * If this is a userspace QP, we're done now. The doorbells
1066 * will be allocated and buffers will be initialized in
1069 if (pd->ibpd.uobject)
1072 ret = mthca_alloc_memfree(dev, qp);
1074 mthca_free_wqe_buf(dev, qp);
1075 mthca_unmap_memfree(dev, qp);
1079 if (mthca_is_memfree(dev)) {
1080 struct mthca_next_seg *next;
1081 struct mthca_data_seg *scatter;
1082 int size = (sizeof (struct mthca_next_seg) +
1083 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1085 for (i = 0; i < qp->rq.max; ++i) {
1086 next = get_recv_wqe(qp, i);
1087 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1089 next->ee_nds = cpu_to_be32(size);
1091 for (scatter = (void *) (next + 1);
1092 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1094 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1097 for (i = 0; i < qp->sq.max; ++i) {
1098 next = get_send_wqe(qp, i);
1099 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1101 qp->send_wqe_offset);
1108 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1109 struct mthca_qp *qp)
1111 /* Sanity check QP size before proceeding */
1112 if (cap->max_send_wr > 65536 || cap->max_recv_wr > 65536 ||
1113 cap->max_send_sge > 64 || cap->max_recv_sge > 64)
1116 if (mthca_is_memfree(dev)) {
1117 qp->rq.max = cap->max_recv_wr ?
1118 roundup_pow_of_two(cap->max_recv_wr) : 0;
1119 qp->sq.max = cap->max_send_wr ?
1120 roundup_pow_of_two(cap->max_send_wr) : 0;
1122 qp->rq.max = cap->max_recv_wr;
1123 qp->sq.max = cap->max_send_wr;
1126 qp->rq.max_gs = cap->max_recv_sge;
1127 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1128 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1129 MTHCA_INLINE_CHUNK_SIZE) /
1130 sizeof (struct mthca_data_seg));
1133 * For MLX transport we need 2 extra S/G entries:
1134 * one for the header and one for the checksum at the end
1136 if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
1137 qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
1143 int mthca_alloc_qp(struct mthca_dev *dev,
1144 struct mthca_pd *pd,
1145 struct mthca_cq *send_cq,
1146 struct mthca_cq *recv_cq,
1147 enum ib_qp_type type,
1148 enum ib_sig_type send_policy,
1149 struct ib_qp_cap *cap,
1150 struct mthca_qp *qp)
1154 err = mthca_set_qp_size(dev, cap, qp);
1159 case IB_QPT_RC: qp->transport = RC; break;
1160 case IB_QPT_UC: qp->transport = UC; break;
1161 case IB_QPT_UD: qp->transport = UD; break;
1162 default: return -EINVAL;
1165 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1169 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1172 mthca_free(&dev->qp_table.alloc, qp->qpn);
1176 spin_lock_irq(&dev->qp_table.lock);
1177 mthca_array_set(&dev->qp_table.qp,
1178 qp->qpn & (dev->limits.num_qps - 1), qp);
1179 spin_unlock_irq(&dev->qp_table.lock);
1184 int mthca_alloc_sqp(struct mthca_dev *dev,
1185 struct mthca_pd *pd,
1186 struct mthca_cq *send_cq,
1187 struct mthca_cq *recv_cq,
1188 enum ib_sig_type send_policy,
1189 struct ib_qp_cap *cap,
1192 struct mthca_sqp *sqp)
1194 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1197 err = mthca_set_qp_size(dev, cap, &sqp->qp);
1201 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1202 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1203 &sqp->header_dma, GFP_KERNEL);
1204 if (!sqp->header_buf)
1207 spin_lock_irq(&dev->qp_table.lock);
1208 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1211 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1212 spin_unlock_irq(&dev->qp_table.lock);
1219 sqp->qp.transport = MLX;
1221 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1222 send_policy, &sqp->qp);
1226 atomic_inc(&pd->sqp_count);
1232 * Lock CQs here, so that CQ polling code can do QP lookup
1233 * without taking a lock.
1235 spin_lock_irq(&send_cq->lock);
1236 if (send_cq != recv_cq)
1237 spin_lock(&recv_cq->lock);
1239 spin_lock(&dev->qp_table.lock);
1240 mthca_array_clear(&dev->qp_table.qp, mqpn);
1241 spin_unlock(&dev->qp_table.lock);
1243 if (send_cq != recv_cq)
1244 spin_unlock(&recv_cq->lock);
1245 spin_unlock_irq(&send_cq->lock);
1248 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1249 sqp->header_buf, sqp->header_dma);
1254 void mthca_free_qp(struct mthca_dev *dev,
1255 struct mthca_qp *qp)
1258 struct mthca_cq *send_cq;
1259 struct mthca_cq *recv_cq;
1261 send_cq = to_mcq(qp->ibqp.send_cq);
1262 recv_cq = to_mcq(qp->ibqp.recv_cq);
1265 * Lock CQs here, so that CQ polling code can do QP lookup
1266 * without taking a lock.
1268 spin_lock_irq(&send_cq->lock);
1269 if (send_cq != recv_cq)
1270 spin_lock(&recv_cq->lock);
1272 spin_lock(&dev->qp_table.lock);
1273 mthca_array_clear(&dev->qp_table.qp,
1274 qp->qpn & (dev->limits.num_qps - 1));
1275 spin_unlock(&dev->qp_table.lock);
1277 if (send_cq != recv_cq)
1278 spin_unlock(&recv_cq->lock);
1279 spin_unlock_irq(&send_cq->lock);
1281 atomic_dec(&qp->refcount);
1282 wait_event(qp->wait, !atomic_read(&qp->refcount));
1284 if (qp->state != IB_QPS_RESET)
1285 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
1288 * If this is a userspace QP, the buffers, MR, CQs and so on
1289 * will be cleaned up in userspace, so all we have to do is
1290 * unref the mem-free tables and free the QPN in our table.
1292 if (!qp->ibqp.uobject) {
1293 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
1294 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1295 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1296 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
1297 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1299 mthca_free_memfree(dev, qp);
1300 mthca_free_wqe_buf(dev, qp);
1303 mthca_unmap_memfree(dev, qp);
1305 if (is_sqp(dev, qp)) {
1306 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1307 dma_free_coherent(&dev->pdev->dev,
1308 to_msqp(qp)->header_buf_size,
1309 to_msqp(qp)->header_buf,
1310 to_msqp(qp)->header_dma);
1312 mthca_free(&dev->qp_table.alloc, qp->qpn);
1315 /* Create UD header for an MLX send and build a data segment for it */
1316 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1317 int ind, struct ib_send_wr *wr,
1318 struct mthca_mlx_seg *mlx,
1319 struct mthca_data_seg *data)
1325 ib_ud_header_init(256, /* assume a MAD */
1326 sqp->ud_header.grh_present,
1329 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1332 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1333 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1334 (sqp->ud_header.lrh.destination_lid ==
1335 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1336 (sqp->ud_header.lrh.service_level << 8));
1337 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1340 switch (wr->opcode) {
1342 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1343 sqp->ud_header.immediate_present = 0;
1345 case IB_WR_SEND_WITH_IMM:
1346 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1347 sqp->ud_header.immediate_present = 1;
1348 sqp->ud_header.immediate_data = wr->imm_data;
1354 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1355 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1356 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1357 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1358 if (!sqp->qp.ibqp.qp_num)
1359 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1360 sqp->pkey_index, &pkey);
1362 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1363 wr->wr.ud.pkey_index, &pkey);
1364 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1365 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1366 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1367 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1368 sqp->qkey : wr->wr.ud.remote_qkey);
1369 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1371 header_size = ib_ud_header_pack(&sqp->ud_header,
1373 ind * MTHCA_UD_HEADER_SIZE);
1375 data->byte_count = cpu_to_be32(header_size);
1376 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1377 data->addr = cpu_to_be64(sqp->header_dma +
1378 ind * MTHCA_UD_HEADER_SIZE);
1383 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1384 struct ib_cq *ib_cq)
1387 struct mthca_cq *cq;
1389 cur = wq->head - wq->tail;
1390 if (likely(cur + nreq < wq->max))
1394 spin_lock(&cq->lock);
1395 cur = wq->head - wq->tail;
1396 spin_unlock(&cq->lock);
1398 return cur + nreq >= wq->max;
1401 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1402 struct ib_send_wr **bad_wr)
1404 struct mthca_dev *dev = to_mdev(ibqp->device);
1405 struct mthca_qp *qp = to_mqp(ibqp);
1408 unsigned long flags;
1418 spin_lock_irqsave(&qp->sq.lock, flags);
1420 /* XXX check that state is OK to post send */
1422 ind = qp->sq.next_ind;
1424 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1425 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1426 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1427 " %d max, %d nreq)\n", qp->qpn,
1428 qp->sq.head, qp->sq.tail,
1435 wqe = get_send_wqe(qp, ind);
1436 prev_wqe = qp->sq.last;
1439 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1440 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1441 ((struct mthca_next_seg *) wqe)->flags =
1442 ((wr->send_flags & IB_SEND_SIGNALED) ?
1443 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1444 ((wr->send_flags & IB_SEND_SOLICITED) ?
1445 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1447 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1448 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1449 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1451 wqe += sizeof (struct mthca_next_seg);
1452 size = sizeof (struct mthca_next_seg) / 16;
1454 switch (qp->transport) {
1456 switch (wr->opcode) {
1457 case IB_WR_ATOMIC_CMP_AND_SWP:
1458 case IB_WR_ATOMIC_FETCH_AND_ADD:
1459 ((struct mthca_raddr_seg *) wqe)->raddr =
1460 cpu_to_be64(wr->wr.atomic.remote_addr);
1461 ((struct mthca_raddr_seg *) wqe)->rkey =
1462 cpu_to_be32(wr->wr.atomic.rkey);
1463 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1465 wqe += sizeof (struct mthca_raddr_seg);
1467 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1468 ((struct mthca_atomic_seg *) wqe)->swap_add =
1469 cpu_to_be64(wr->wr.atomic.swap);
1470 ((struct mthca_atomic_seg *) wqe)->compare =
1471 cpu_to_be64(wr->wr.atomic.compare_add);
1473 ((struct mthca_atomic_seg *) wqe)->swap_add =
1474 cpu_to_be64(wr->wr.atomic.compare_add);
1475 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1478 wqe += sizeof (struct mthca_atomic_seg);
1479 size += sizeof (struct mthca_raddr_seg) / 16 +
1480 sizeof (struct mthca_atomic_seg);
1483 case IB_WR_RDMA_WRITE:
1484 case IB_WR_RDMA_WRITE_WITH_IMM:
1485 case IB_WR_RDMA_READ:
1486 ((struct mthca_raddr_seg *) wqe)->raddr =
1487 cpu_to_be64(wr->wr.rdma.remote_addr);
1488 ((struct mthca_raddr_seg *) wqe)->rkey =
1489 cpu_to_be32(wr->wr.rdma.rkey);
1490 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1491 wqe += sizeof (struct mthca_raddr_seg);
1492 size += sizeof (struct mthca_raddr_seg) / 16;
1496 /* No extra segments required for sends */
1503 switch (wr->opcode) {
1504 case IB_WR_RDMA_WRITE:
1505 case IB_WR_RDMA_WRITE_WITH_IMM:
1506 ((struct mthca_raddr_seg *) wqe)->raddr =
1507 cpu_to_be64(wr->wr.rdma.remote_addr);
1508 ((struct mthca_raddr_seg *) wqe)->rkey =
1509 cpu_to_be32(wr->wr.rdma.rkey);
1510 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1511 wqe += sizeof (struct mthca_raddr_seg);
1512 size += sizeof (struct mthca_raddr_seg) / 16;
1516 /* No extra segments required for sends */
1523 ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1524 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1525 ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1526 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1527 ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1528 cpu_to_be32(wr->wr.ud.remote_qpn);
1529 ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1530 cpu_to_be32(wr->wr.ud.remote_qkey);
1532 wqe += sizeof (struct mthca_tavor_ud_seg);
1533 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1537 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1538 wqe - sizeof (struct mthca_next_seg),
1544 wqe += sizeof (struct mthca_data_seg);
1545 size += sizeof (struct mthca_data_seg) / 16;
1549 if (wr->num_sge > qp->sq.max_gs) {
1550 mthca_err(dev, "too many gathers\n");
1556 for (i = 0; i < wr->num_sge; ++i) {
1557 ((struct mthca_data_seg *) wqe)->byte_count =
1558 cpu_to_be32(wr->sg_list[i].length);
1559 ((struct mthca_data_seg *) wqe)->lkey =
1560 cpu_to_be32(wr->sg_list[i].lkey);
1561 ((struct mthca_data_seg *) wqe)->addr =
1562 cpu_to_be64(wr->sg_list[i].addr);
1563 wqe += sizeof (struct mthca_data_seg);
1564 size += sizeof (struct mthca_data_seg) / 16;
1567 /* Add one more inline data segment for ICRC */
1568 if (qp->transport == MLX) {
1569 ((struct mthca_data_seg *) wqe)->byte_count =
1570 cpu_to_be32((1 << 31) | 4);
1571 ((u32 *) wqe)[1] = 0;
1572 wqe += sizeof (struct mthca_data_seg);
1573 size += sizeof (struct mthca_data_seg) / 16;
1576 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1578 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1579 mthca_err(dev, "opcode invalid\n");
1586 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1587 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1588 qp->send_wqe_offset) |
1589 mthca_opcode[wr->opcode]);
1591 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1592 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1597 op0 = mthca_opcode[wr->opcode];
1601 if (unlikely(ind >= qp->sq.max))
1609 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1610 qp->send_wqe_offset) | f0 | op0);
1611 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1615 mthca_write64(doorbell,
1616 dev->kar + MTHCA_SEND_DOORBELL,
1617 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1620 qp->sq.next_ind = ind;
1621 qp->sq.head += nreq;
1623 spin_unlock_irqrestore(&qp->sq.lock, flags);
1627 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1628 struct ib_recv_wr **bad_wr)
1630 struct mthca_dev *dev = to_mdev(ibqp->device);
1631 struct mthca_qp *qp = to_mqp(ibqp);
1632 unsigned long flags;
1642 spin_lock_irqsave(&qp->rq.lock, flags);
1644 /* XXX check that state is OK to post receive */
1646 ind = qp->rq.next_ind;
1648 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1649 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1650 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1651 " %d max, %d nreq)\n", qp->qpn,
1652 qp->rq.head, qp->rq.tail,
1659 wqe = get_recv_wqe(qp, ind);
1660 prev_wqe = qp->rq.last;
1663 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1664 ((struct mthca_next_seg *) wqe)->ee_nds =
1665 cpu_to_be32(MTHCA_NEXT_DBD);
1666 ((struct mthca_next_seg *) wqe)->flags = 0;
1668 wqe += sizeof (struct mthca_next_seg);
1669 size = sizeof (struct mthca_next_seg) / 16;
1671 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1677 for (i = 0; i < wr->num_sge; ++i) {
1678 ((struct mthca_data_seg *) wqe)->byte_count =
1679 cpu_to_be32(wr->sg_list[i].length);
1680 ((struct mthca_data_seg *) wqe)->lkey =
1681 cpu_to_be32(wr->sg_list[i].lkey);
1682 ((struct mthca_data_seg *) wqe)->addr =
1683 cpu_to_be64(wr->sg_list[i].addr);
1684 wqe += sizeof (struct mthca_data_seg);
1685 size += sizeof (struct mthca_data_seg) / 16;
1688 qp->wrid[ind] = wr->wr_id;
1690 if (likely(prev_wqe)) {
1691 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1692 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1694 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1695 cpu_to_be32(MTHCA_NEXT_DBD | size);
1702 if (unlikely(ind >= qp->rq.max))
1710 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1711 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1715 mthca_write64(doorbell,
1716 dev->kar + MTHCA_RECEIVE_DOORBELL,
1717 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1720 qp->rq.next_ind = ind;
1721 qp->rq.head += nreq;
1723 spin_unlock_irqrestore(&qp->rq.lock, flags);
1727 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1728 struct ib_send_wr **bad_wr)
1730 struct mthca_dev *dev = to_mdev(ibqp->device);
1731 struct mthca_qp *qp = to_mqp(ibqp);
1734 unsigned long flags;
1744 spin_lock_irqsave(&qp->sq.lock, flags);
1746 /* XXX check that state is OK to post send */
1748 ind = qp->sq.head & (qp->sq.max - 1);
1750 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1751 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1752 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1753 " %d max, %d nreq)\n", qp->qpn,
1754 qp->sq.head, qp->sq.tail,
1761 wqe = get_send_wqe(qp, ind);
1762 prev_wqe = qp->sq.last;
1765 ((struct mthca_next_seg *) wqe)->flags =
1766 ((wr->send_flags & IB_SEND_SIGNALED) ?
1767 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1768 ((wr->send_flags & IB_SEND_SOLICITED) ?
1769 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1771 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1772 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1773 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1775 wqe += sizeof (struct mthca_next_seg);
1776 size = sizeof (struct mthca_next_seg) / 16;
1778 switch (qp->transport) {
1780 switch (wr->opcode) {
1781 case IB_WR_ATOMIC_CMP_AND_SWP:
1782 case IB_WR_ATOMIC_FETCH_AND_ADD:
1783 ((struct mthca_raddr_seg *) wqe)->raddr =
1784 cpu_to_be64(wr->wr.atomic.remote_addr);
1785 ((struct mthca_raddr_seg *) wqe)->rkey =
1786 cpu_to_be32(wr->wr.atomic.rkey);
1787 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1789 wqe += sizeof (struct mthca_raddr_seg);
1791 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1792 ((struct mthca_atomic_seg *) wqe)->swap_add =
1793 cpu_to_be64(wr->wr.atomic.swap);
1794 ((struct mthca_atomic_seg *) wqe)->compare =
1795 cpu_to_be64(wr->wr.atomic.compare_add);
1797 ((struct mthca_atomic_seg *) wqe)->swap_add =
1798 cpu_to_be64(wr->wr.atomic.compare_add);
1799 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1802 wqe += sizeof (struct mthca_atomic_seg);
1803 size += sizeof (struct mthca_raddr_seg) / 16 +
1804 sizeof (struct mthca_atomic_seg);
1807 case IB_WR_RDMA_READ:
1808 case IB_WR_RDMA_WRITE:
1809 case IB_WR_RDMA_WRITE_WITH_IMM:
1810 ((struct mthca_raddr_seg *) wqe)->raddr =
1811 cpu_to_be64(wr->wr.rdma.remote_addr);
1812 ((struct mthca_raddr_seg *) wqe)->rkey =
1813 cpu_to_be32(wr->wr.rdma.rkey);
1814 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1815 wqe += sizeof (struct mthca_raddr_seg);
1816 size += sizeof (struct mthca_raddr_seg) / 16;
1820 /* No extra segments required for sends */
1827 switch (wr->opcode) {
1828 case IB_WR_RDMA_WRITE:
1829 case IB_WR_RDMA_WRITE_WITH_IMM:
1830 ((struct mthca_raddr_seg *) wqe)->raddr =
1831 cpu_to_be64(wr->wr.rdma.remote_addr);
1832 ((struct mthca_raddr_seg *) wqe)->rkey =
1833 cpu_to_be32(wr->wr.rdma.rkey);
1834 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1835 wqe += sizeof (struct mthca_raddr_seg);
1836 size += sizeof (struct mthca_raddr_seg) / 16;
1840 /* No extra segments required for sends */
1847 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
1848 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1849 ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
1850 cpu_to_be32(wr->wr.ud.remote_qpn);
1851 ((struct mthca_arbel_ud_seg *) wqe)->qkey =
1852 cpu_to_be32(wr->wr.ud.remote_qkey);
1854 wqe += sizeof (struct mthca_arbel_ud_seg);
1855 size += sizeof (struct mthca_arbel_ud_seg) / 16;
1859 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1860 wqe - sizeof (struct mthca_next_seg),
1866 wqe += sizeof (struct mthca_data_seg);
1867 size += sizeof (struct mthca_data_seg) / 16;
1871 if (wr->num_sge > qp->sq.max_gs) {
1872 mthca_err(dev, "too many gathers\n");
1878 for (i = 0; i < wr->num_sge; ++i) {
1879 ((struct mthca_data_seg *) wqe)->byte_count =
1880 cpu_to_be32(wr->sg_list[i].length);
1881 ((struct mthca_data_seg *) wqe)->lkey =
1882 cpu_to_be32(wr->sg_list[i].lkey);
1883 ((struct mthca_data_seg *) wqe)->addr =
1884 cpu_to_be64(wr->sg_list[i].addr);
1885 wqe += sizeof (struct mthca_data_seg);
1886 size += sizeof (struct mthca_data_seg) / 16;
1889 /* Add one more inline data segment for ICRC */
1890 if (qp->transport == MLX) {
1891 ((struct mthca_data_seg *) wqe)->byte_count =
1892 cpu_to_be32((1 << 31) | 4);
1893 ((u32 *) wqe)[1] = 0;
1894 wqe += sizeof (struct mthca_data_seg);
1895 size += sizeof (struct mthca_data_seg) / 16;
1898 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1900 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1901 mthca_err(dev, "opcode invalid\n");
1907 if (likely(prev_wqe)) {
1908 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1909 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1910 qp->send_wqe_offset) |
1911 mthca_opcode[wr->opcode]);
1913 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1914 cpu_to_be32(MTHCA_NEXT_DBD | size);
1919 op0 = mthca_opcode[wr->opcode];
1923 if (unlikely(ind >= qp->sq.max))
1931 doorbell[0] = cpu_to_be32((nreq << 24) |
1932 ((qp->sq.head & 0xffff) << 8) |
1934 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1936 qp->sq.head += nreq;
1939 * Make sure that descriptors are written before
1943 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1946 * Make sure doorbell record is written before we
1947 * write MMIO send doorbell.
1950 mthca_write64(doorbell,
1951 dev->kar + MTHCA_SEND_DOORBELL,
1952 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1955 spin_unlock_irqrestore(&qp->sq.lock, flags);
1959 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1960 struct ib_recv_wr **bad_wr)
1962 struct mthca_dev *dev = to_mdev(ibqp->device);
1963 struct mthca_qp *qp = to_mqp(ibqp);
1964 unsigned long flags;
1971 spin_lock_irqsave(&qp->rq.lock, flags);
1973 /* XXX check that state is OK to post receive */
1975 ind = qp->rq.head & (qp->rq.max - 1);
1977 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1978 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1979 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1980 " %d max, %d nreq)\n", qp->qpn,
1981 qp->rq.head, qp->rq.tail,
1988 wqe = get_recv_wqe(qp, ind);
1990 ((struct mthca_next_seg *) wqe)->flags = 0;
1992 wqe += sizeof (struct mthca_next_seg);
1994 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2000 for (i = 0; i < wr->num_sge; ++i) {
2001 ((struct mthca_data_seg *) wqe)->byte_count =
2002 cpu_to_be32(wr->sg_list[i].length);
2003 ((struct mthca_data_seg *) wqe)->lkey =
2004 cpu_to_be32(wr->sg_list[i].lkey);
2005 ((struct mthca_data_seg *) wqe)->addr =
2006 cpu_to_be64(wr->sg_list[i].addr);
2007 wqe += sizeof (struct mthca_data_seg);
2010 if (i < qp->rq.max_gs) {
2011 ((struct mthca_data_seg *) wqe)->byte_count = 0;
2012 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
2013 ((struct mthca_data_seg *) wqe)->addr = 0;
2016 qp->wrid[ind] = wr->wr_id;
2019 if (unlikely(ind >= qp->rq.max))
2024 qp->rq.head += nreq;
2027 * Make sure that descriptors are written before
2031 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2034 spin_unlock_irqrestore(&qp->rq.lock, flags);
2038 int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2039 int index, int *dbd, __be32 *new_wqe)
2041 struct mthca_next_seg *next;
2044 * For SRQs, all WQEs generate a CQE, so we're always at the
2045 * end of the doorbell chain.
2053 next = get_send_wqe(qp, index);
2055 next = get_recv_wqe(qp, index);
2057 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2058 if (next->ee_nds & cpu_to_be32(0x3f))
2059 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2060 (next->ee_nds & cpu_to_be32(0x3f));
2067 int __devinit mthca_init_qp_table(struct mthca_dev *dev)
2073 spin_lock_init(&dev->qp_table.lock);
2076 * We reserve 2 extra QPs per port for the special QPs. The
2077 * special QP for port 1 has to be even, so round up.
2079 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2080 err = mthca_alloc_init(&dev->qp_table.alloc,
2081 dev->limits.num_qps,
2083 dev->qp_table.sqp_start +
2084 MTHCA_MAX_PORTS * 2);
2088 err = mthca_array_init(&dev->qp_table.qp,
2089 dev->limits.num_qps);
2091 mthca_alloc_cleanup(&dev->qp_table.alloc);
2095 for (i = 0; i < 2; ++i) {
2096 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2097 dev->qp_table.sqp_start + i * 2,
2102 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2103 "status %02x, aborting.\n",
2112 for (i = 0; i < 2; ++i)
2113 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2115 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2116 mthca_alloc_cleanup(&dev->qp_table.alloc);
2121 void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2126 for (i = 0; i < 2; ++i)
2127 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2129 mthca_alloc_cleanup(&dev->qp_table.alloc);