2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_verbs.h"
35 #include "ipath_kernel.h"
37 /* cut down ridiculously long IB macro names */
38 #define OP(x) IB_OPCODE_RC_##x
40 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
45 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
46 ss->sge = wqe->sg_list[0];
47 ss->sg_list = wqe->sg_list + 1;
48 ss->num_sge = wqe->wr.num_sge;
49 ipath_skip_sge(ss, len);
50 return wqe->length - len;
54 * ipath_init_restart- initialize the qp->s_sge after a restart
55 * @qp: the QP who's SGE we're restarting
56 * @wqe: the work queue to initialize the QP's SGE from
58 * The QP s_lock should be held and interrupts disabled.
60 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
62 struct ipath_ibdev *dev;
64 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
65 ib_mtu_enum_to_int(qp->path_mtu));
66 dev = to_idev(qp->ibqp.device);
67 spin_lock(&dev->pending_lock);
68 if (list_empty(&qp->timerwait))
69 list_add_tail(&qp->timerwait,
70 &dev->pending[dev->pending_index]);
71 spin_unlock(&dev->pending_lock);
75 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
76 * @qp: a pointer to the QP
77 * @ohdr: a pointer to the IB header being constructed
80 * Return 1 if constructed; otherwise, return 0.
81 * Note that we are in the responder's side of the QP context.
82 * Note the QP s_lock must be held.
84 static int ipath_make_rc_ack(struct ipath_qp *qp,
85 struct ipath_other_headers *ohdr,
86 u32 pmtu, u32 *bth0p, u32 *bth2p)
88 struct ipath_ack_entry *e;
94 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
97 switch (qp->s_ack_state) {
98 case OP(RDMA_READ_RESPONSE_LAST):
99 case OP(RDMA_READ_RESPONSE_ONLY):
100 case OP(ATOMIC_ACKNOWLEDGE):
101 qp->s_ack_state = OP(ACKNOWLEDGE);
103 case OP(ACKNOWLEDGE):
104 /* Check for no next entry in the queue. */
105 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
106 if (qp->s_flags & IPATH_S_ACK_PENDING)
111 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
112 if (e->opcode == OP(RDMA_READ_REQUEST)) {
113 /* Copy SGE state in case we need to resend */
114 qp->s_ack_rdma_sge = e->rdma_sge;
115 qp->s_cur_sge = &qp->s_ack_rdma_sge;
116 len = e->rdma_sge.sge.sge_length;
119 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
121 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
122 if (++qp->s_tail_ack_queue >
123 IPATH_MAX_RDMA_ATOMIC)
124 qp->s_tail_ack_queue = 0;
126 ohdr->u.aeth = ipath_compute_aeth(qp);
128 qp->s_ack_rdma_psn = e->psn;
129 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
131 /* COMPARE_SWAP or FETCH_ADD */
132 qp->s_cur_sge = NULL;
134 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
135 ohdr->u.at.aeth = ipath_compute_aeth(qp);
136 ohdr->u.at.atomic_ack_eth[0] =
137 cpu_to_be32(e->atomic_data >> 32);
138 ohdr->u.at.atomic_ack_eth[1] =
139 cpu_to_be32(e->atomic_data);
140 hwords += sizeof(ohdr->u.at) / sizeof(u32);
142 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
143 qp->s_tail_ack_queue = 0;
145 bth0 = qp->s_ack_state << 24;
148 case OP(RDMA_READ_RESPONSE_FIRST):
149 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
151 case OP(RDMA_READ_RESPONSE_MIDDLE):
152 len = qp->s_ack_rdma_sge.sge.sge_length;
156 ohdr->u.aeth = ipath_compute_aeth(qp);
158 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
159 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
160 qp->s_tail_ack_queue = 0;
162 bth0 = qp->s_ack_state << 24;
163 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
169 * Send a regular ACK.
170 * Set the s_ack_state so we wait until after sending
171 * the ACK before setting s_ack_state to ACKNOWLEDGE
174 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
175 qp->s_flags &= ~IPATH_S_ACK_PENDING;
176 qp->s_cur_sge = NULL;
179 cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
181 IPATH_AETH_CREDIT_SHIFT));
183 ohdr->u.aeth = ipath_compute_aeth(qp);
186 bth0 = OP(ACKNOWLEDGE) << 24;
187 bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
189 qp->s_hdrwords = hwords;
190 qp->s_cur_size = len;
200 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
201 * @qp: a pointer to the QP
202 * @ohdr: a pointer to the IB header being constructed
203 * @pmtu: the path MTU
204 * @bth0p: pointer to the BTH opcode word
205 * @bth2p: pointer to the BTH PSN word
207 * Return 1 if constructed; otherwise, return 0.
208 * Note the QP s_lock must be held and interrupts disabled.
210 int ipath_make_rc_req(struct ipath_qp *qp,
211 struct ipath_other_headers *ohdr,
212 u32 pmtu, u32 *bth0p, u32 *bth2p)
214 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
215 struct ipath_sge_state *ss;
216 struct ipath_swqe *wqe;
223 /* Sending responses has higher priority over sending requests. */
224 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
225 (qp->s_flags & IPATH_S_ACK_PENDING) ||
226 qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE) &&
227 ipath_make_rc_ack(qp, ohdr, pmtu, bth0p, bth2p))
230 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
234 /* Limit the number of packets sent without an ACK. */
235 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
236 qp->s_wait_credit = 1;
238 spin_lock(&dev->pending_lock);
239 if (list_empty(&qp->timerwait))
240 list_add_tail(&qp->timerwait,
241 &dev->pending[dev->pending_index]);
242 spin_unlock(&dev->pending_lock);
246 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
250 /* Send a request. */
251 wqe = get_swqe_ptr(qp, qp->s_cur);
252 switch (qp->s_state) {
255 * Resend an old request or start a new one.
257 * We keep track of the current SWQE so that
258 * we don't reset the "furthest progress" state
259 * if we need to back up.
262 if (qp->s_cur == qp->s_tail) {
263 /* Check if send work queue is empty. */
264 if (qp->s_tail == qp->s_head)
267 * If a fence is requested, wait for previous
268 * RDMA read and atomic operations to finish.
270 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
271 qp->s_num_rd_atomic) {
272 qp->s_flags |= IPATH_S_FENCE_PENDING;
275 wqe->psn = qp->s_next_psn;
279 * Note that we have to be careful not to modify the
280 * original work request since we may need to resend
286 switch (wqe->wr.opcode) {
288 case IB_WR_SEND_WITH_IMM:
289 /* If no credit, return. */
290 if (qp->s_lsn != (u32) -1 &&
291 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
293 wqe->lpsn = wqe->psn;
295 wqe->lpsn += (len - 1) / pmtu;
296 qp->s_state = OP(SEND_FIRST);
300 if (wqe->wr.opcode == IB_WR_SEND)
301 qp->s_state = OP(SEND_ONLY);
303 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
304 /* Immediate data comes after the BTH */
305 ohdr->u.imm_data = wqe->wr.imm_data;
308 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
310 bth2 = 1 << 31; /* Request ACK. */
311 if (++qp->s_cur == qp->s_size)
315 case IB_WR_RDMA_WRITE:
316 if (newreq && qp->s_lsn != (u32) -1)
319 case IB_WR_RDMA_WRITE_WITH_IMM:
320 /* If no credit, return. */
321 if (qp->s_lsn != (u32) -1 &&
322 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
324 ohdr->u.rc.reth.vaddr =
325 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
326 ohdr->u.rc.reth.rkey =
327 cpu_to_be32(wqe->wr.wr.rdma.rkey);
328 ohdr->u.rc.reth.length = cpu_to_be32(len);
329 hwords += sizeof(struct ib_reth) / sizeof(u32);
330 wqe->lpsn = wqe->psn;
332 wqe->lpsn += (len - 1) / pmtu;
333 qp->s_state = OP(RDMA_WRITE_FIRST);
337 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
338 qp->s_state = OP(RDMA_WRITE_ONLY);
341 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
342 /* Immediate data comes after RETH */
343 ohdr->u.rc.imm_data = wqe->wr.imm_data;
345 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
348 bth2 = 1 << 31; /* Request ACK. */
349 if (++qp->s_cur == qp->s_size)
353 case IB_WR_RDMA_READ:
355 * Don't allow more operations to be started
356 * than the QP limits allow.
359 if (qp->s_num_rd_atomic >=
360 qp->s_max_rd_atomic) {
361 qp->s_flags |= IPATH_S_RDMAR_PENDING;
364 qp->s_num_rd_atomic++;
365 if (qp->s_lsn != (u32) -1)
368 * Adjust s_next_psn to count the
369 * expected number of responses.
372 qp->s_next_psn += (len - 1) / pmtu;
373 wqe->lpsn = qp->s_next_psn++;
375 ohdr->u.rc.reth.vaddr =
376 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
377 ohdr->u.rc.reth.rkey =
378 cpu_to_be32(wqe->wr.wr.rdma.rkey);
379 ohdr->u.rc.reth.length = cpu_to_be32(len);
380 qp->s_state = OP(RDMA_READ_REQUEST);
381 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
384 if (++qp->s_cur == qp->s_size)
388 case IB_WR_ATOMIC_CMP_AND_SWP:
389 case IB_WR_ATOMIC_FETCH_AND_ADD:
391 * Don't allow more operations to be started
392 * than the QP limits allow.
395 if (qp->s_num_rd_atomic >=
396 qp->s_max_rd_atomic) {
397 qp->s_flags |= IPATH_S_RDMAR_PENDING;
400 qp->s_num_rd_atomic++;
401 if (qp->s_lsn != (u32) -1)
403 wqe->lpsn = wqe->psn;
405 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
406 qp->s_state = OP(COMPARE_SWAP);
407 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
408 wqe->wr.wr.atomic.swap);
409 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
410 wqe->wr.wr.atomic.compare_add);
412 qp->s_state = OP(FETCH_ADD);
413 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
414 wqe->wr.wr.atomic.compare_add);
415 ohdr->u.atomic_eth.compare_data = 0;
417 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
418 wqe->wr.wr.atomic.remote_addr >> 32);
419 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
420 wqe->wr.wr.atomic.remote_addr);
421 ohdr->u.atomic_eth.rkey = cpu_to_be32(
422 wqe->wr.wr.atomic.rkey);
423 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
426 if (++qp->s_cur == qp->s_size)
433 qp->s_sge.sge = wqe->sg_list[0];
434 qp->s_sge.sg_list = wqe->sg_list + 1;
435 qp->s_sge.num_sge = wqe->wr.num_sge;
436 qp->s_len = wqe->length;
439 if (qp->s_tail >= qp->s_size)
442 bth2 |= qp->s_psn & IPATH_PSN_MASK;
443 if (wqe->wr.opcode == IB_WR_RDMA_READ)
444 qp->s_psn = wqe->lpsn + 1;
447 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
448 qp->s_next_psn = qp->s_psn;
451 * Put the QP on the pending list so lost ACKs will cause
452 * a retry. More than one request can be pending so the
453 * QP may already be on the dev->pending list.
455 spin_lock(&dev->pending_lock);
456 if (list_empty(&qp->timerwait))
457 list_add_tail(&qp->timerwait,
458 &dev->pending[dev->pending_index]);
459 spin_unlock(&dev->pending_lock);
462 case OP(RDMA_READ_RESPONSE_FIRST):
464 * This case can only happen if a send is restarted.
465 * See ipath_restart_rc().
467 ipath_init_restart(qp, wqe);
470 qp->s_state = OP(SEND_MIDDLE);
472 case OP(SEND_MIDDLE):
473 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
474 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
475 qp->s_next_psn = qp->s_psn;
482 if (wqe->wr.opcode == IB_WR_SEND)
483 qp->s_state = OP(SEND_LAST);
485 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
486 /* Immediate data comes after the BTH */
487 ohdr->u.imm_data = wqe->wr.imm_data;
490 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
492 bth2 |= 1 << 31; /* Request ACK. */
494 if (qp->s_cur >= qp->s_size)
498 case OP(RDMA_READ_RESPONSE_LAST):
500 * This case can only happen if a RDMA write is restarted.
501 * See ipath_restart_rc().
503 ipath_init_restart(qp, wqe);
505 case OP(RDMA_WRITE_FIRST):
506 qp->s_state = OP(RDMA_WRITE_MIDDLE);
508 case OP(RDMA_WRITE_MIDDLE):
509 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
510 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
511 qp->s_next_psn = qp->s_psn;
518 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
519 qp->s_state = OP(RDMA_WRITE_LAST);
521 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
522 /* Immediate data comes after the BTH */
523 ohdr->u.imm_data = wqe->wr.imm_data;
525 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
528 bth2 |= 1 << 31; /* Request ACK. */
530 if (qp->s_cur >= qp->s_size)
534 case OP(RDMA_READ_RESPONSE_MIDDLE):
536 * This case can only happen if a RDMA read is restarted.
537 * See ipath_restart_rc().
539 ipath_init_restart(qp, wqe);
540 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
541 ohdr->u.rc.reth.vaddr =
542 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
543 ohdr->u.rc.reth.rkey =
544 cpu_to_be32(wqe->wr.wr.rdma.rkey);
545 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
546 qp->s_state = OP(RDMA_READ_REQUEST);
547 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
548 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
549 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
550 qp->s_next_psn = qp->s_psn;
554 if (qp->s_cur == qp->s_size)
558 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
559 bth2 |= 1 << 31; /* Request ACK. */
561 qp->s_hdrwords = hwords;
563 qp->s_cur_size = len;
564 *bth0p = bth0 | (qp->s_state << 24);
574 * send_rc_ack - Construct an ACK packet and send it
575 * @qp: a pointer to the QP
577 * This is called from ipath_rc_rcv() and only uses the receive
579 * Note that RDMA reads and atomics are handled in the
580 * send side QP state and tasklet.
582 static void send_rc_ack(struct ipath_qp *qp)
584 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
588 struct ipath_ib_header hdr;
589 struct ipath_other_headers *ohdr;
592 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
593 if (qp->r_head_ack_queue != qp->s_tail_ack_queue)
596 /* Construct the header. */
598 lrh0 = IPATH_LRH_BTH;
599 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
601 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
602 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
603 &qp->remote_ah_attr.grh,
606 lrh0 = IPATH_LRH_GRH;
608 /* read pkey_index w/o lock (its atomic) */
609 bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
610 OP(ACKNOWLEDGE) << 24;
612 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
614 IPATH_AETH_CREDIT_SHIFT));
616 ohdr->u.aeth = ipath_compute_aeth(qp);
617 lrh0 |= qp->remote_ah_attr.sl << 4;
618 hdr.lrh[0] = cpu_to_be16(lrh0);
619 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
620 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
621 hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
622 ohdr->bth[0] = cpu_to_be32(bth0);
623 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
624 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
627 * If we can send the ACK, clear the ACK state.
629 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
630 dev->n_unicast_xmit++;
635 * We are out of PIO buffers at the moment.
636 * Pass responsibility for sending the ACK to the
637 * send tasklet so that when a PIO buffer becomes
638 * available, the ACK is sent ahead of other outgoing
644 spin_lock_irqsave(&qp->s_lock, flags);
645 qp->s_flags |= IPATH_S_ACK_PENDING;
646 qp->s_nak_state = qp->r_nak_state;
647 qp->s_ack_psn = qp->r_ack_psn;
648 spin_unlock_irqrestore(&qp->s_lock, flags);
650 /* Call ipath_do_rc_send() in another thread. */
651 tasklet_hi_schedule(&qp->s_task);
658 * reset_psn - reset the QP state to send starting from PSN
660 * @psn: the packet sequence number to restart at
662 * This is called from ipath_rc_rcv() to process an incoming RC ACK
664 * Called at interrupt level with the QP s_lock held.
666 static void reset_psn(struct ipath_qp *qp, u32 psn)
669 struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
675 * If we are starting the request from the beginning,
676 * let the normal send code handle initialization.
678 if (ipath_cmp24(psn, wqe->psn) <= 0) {
679 qp->s_state = OP(SEND_LAST);
683 /* Find the work request opcode corresponding to the given PSN. */
684 opcode = wqe->wr.opcode;
688 if (++n == qp->s_size)
692 wqe = get_swqe_ptr(qp, n);
693 diff = ipath_cmp24(psn, wqe->psn);
698 * If we are starting the request from the beginning,
699 * let the normal send code handle initialization.
702 qp->s_state = OP(SEND_LAST);
705 opcode = wqe->wr.opcode;
709 * Set the state to restart in the middle of a request.
710 * Don't change the s_sge, s_cur_sge, or s_cur_size.
711 * See ipath_do_rc_send().
715 case IB_WR_SEND_WITH_IMM:
716 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
719 case IB_WR_RDMA_WRITE:
720 case IB_WR_RDMA_WRITE_WITH_IMM:
721 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
724 case IB_WR_RDMA_READ:
725 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
730 * This case shouldn't happen since its only
733 qp->s_state = OP(SEND_LAST);
740 * ipath_restart_rc - back up requester to resend the last un-ACKed request
741 * @qp: the QP to restart
742 * @psn: packet sequence number for the request
743 * @wc: the work completion request
745 * The QP s_lock should be held and interrupts disabled.
747 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
749 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
750 struct ipath_ibdev *dev;
752 if (qp->s_retry == 0) {
753 wc->wr_id = wqe->wr.wr_id;
754 wc->status = IB_WC_RETRY_EXC_ERR;
755 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
759 wc->src_qp = qp->remote_qpn;
761 wc->slid = qp->remote_ah_attr.dlid;
762 wc->sl = qp->remote_ah_attr.sl;
763 wc->dlid_path_bits = 0;
765 ipath_sqerror_qp(qp, wc);
771 * Remove the QP from the timeout queue.
772 * Note: it may already have been removed by ipath_ib_timer().
774 dev = to_idev(qp->ibqp.device);
775 spin_lock(&dev->pending_lock);
776 if (!list_empty(&qp->timerwait))
777 list_del_init(&qp->timerwait);
778 spin_unlock(&dev->pending_lock);
780 if (wqe->wr.opcode == IB_WR_RDMA_READ)
783 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
786 tasklet_hi_schedule(&qp->s_task);
792 static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
794 if (qp->s_wait_credit) {
795 qp->s_wait_credit = 0;
796 tasklet_hi_schedule(&qp->s_task);
798 qp->s_last_psn = psn;
802 * do_rc_ack - process an incoming RC ACK
803 * @qp: the QP the ACK came in on
804 * @psn: the packet sequence number of the ACK
805 * @opcode: the opcode of the request that resulted in the ACK
807 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
809 * Called at interrupt level with the QP s_lock held and interrupts disabled.
810 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
812 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
814 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
816 struct ipath_swqe *wqe;
821 * Remove the QP from the timeout queue (or RNR timeout queue).
822 * If ipath_ib_timer() has already removed it,
823 * it's OK since we hold the QP s_lock and ipath_restart_rc()
824 * just won't find anything to restart if we ACK everything.
826 spin_lock(&dev->pending_lock);
827 if (!list_empty(&qp->timerwait))
828 list_del_init(&qp->timerwait);
829 spin_unlock(&dev->pending_lock);
832 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
833 * requests and implicitly NAK RDMA read and atomic requests issued
834 * before the NAK'ed request. The MSN won't include the NAK'ed
835 * request but will include an ACK'ed request(s).
840 wqe = get_swqe_ptr(qp, qp->s_last);
843 * The MSN might be for a later WQE than the PSN indicates so
844 * only complete WQEs that the PSN finishes.
846 while (ipath_cmp24(ack_psn, wqe->lpsn) >= 0) {
848 * If this request is a RDMA read or atomic, and the ACK is
849 * for a later operation, this ACK NAKs the RDMA read or
850 * atomic. In other words, only a RDMA_READ_LAST or ONLY
851 * can ACK a RDMA read and likewise for atomic ops. Note
852 * that the NAK case can only happen if relaxed ordering is
853 * used and requests are sent after an RDMA read or atomic
854 * is sent but before the response is received.
856 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
857 (opcode != OP(RDMA_READ_RESPONSE_LAST) ||
858 ipath_cmp24(ack_psn, wqe->lpsn) != 0)) ||
859 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
860 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
861 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
862 ipath_cmp24(wqe->psn, psn) != 0))) {
864 * The last valid PSN seen is the previous
867 update_last_psn(qp, wqe->psn - 1);
868 /* Retry this request. */
869 ipath_restart_rc(qp, wqe->psn, &wc);
871 * No need to process the ACK/NAK since we are
872 * restarting an earlier request.
876 if (qp->s_num_rd_atomic &&
877 (wqe->wr.opcode == IB_WR_RDMA_READ ||
878 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
879 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
880 qp->s_num_rd_atomic--;
881 /* Restart sending task if fence is complete */
882 if ((qp->s_flags & IPATH_S_FENCE_PENDING) &&
883 !qp->s_num_rd_atomic) {
884 qp->s_flags &= ~IPATH_S_FENCE_PENDING;
885 tasklet_hi_schedule(&qp->s_task);
886 } else if (qp->s_flags & IPATH_S_RDMAR_PENDING) {
887 qp->s_flags &= ~IPATH_S_RDMAR_PENDING;
888 tasklet_hi_schedule(&qp->s_task);
891 /* Post a send completion queue entry if requested. */
892 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
893 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
894 wc.wr_id = wqe->wr.wr_id;
895 wc.status = IB_WC_SUCCESS;
896 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
898 wc.byte_len = wqe->length;
901 wc.src_qp = qp->remote_qpn;
904 wc.slid = qp->remote_ah_attr.dlid;
905 wc.sl = qp->remote_ah_attr.sl;
906 wc.dlid_path_bits = 0;
908 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
910 qp->s_retry = qp->s_retry_cnt;
912 * If we are completing a request which is in the process of
913 * being resent, we can stop resending it since we know the
914 * responder has already seen it.
916 if (qp->s_last == qp->s_cur) {
917 if (++qp->s_cur >= qp->s_size)
919 qp->s_last = qp->s_cur;
920 if (qp->s_last == qp->s_tail)
922 wqe = get_swqe_ptr(qp, qp->s_cur);
923 qp->s_state = OP(SEND_LAST);
924 qp->s_psn = wqe->psn;
926 if (++qp->s_last >= qp->s_size)
928 if (qp->s_last == qp->s_tail)
930 wqe = get_swqe_ptr(qp, qp->s_last);
934 switch (aeth >> 29) {
937 /* If this is a partial ACK, reset the retransmit timer. */
938 if (qp->s_last != qp->s_tail) {
939 spin_lock(&dev->pending_lock);
940 list_add_tail(&qp->timerwait,
941 &dev->pending[dev->pending_index]);
942 spin_unlock(&dev->pending_lock);
944 * If we get a partial ACK for a resent operation,
945 * we can stop resending the earlier packets and
946 * continue with the next packet the receiver wants.
948 if (ipath_cmp24(qp->s_psn, psn) <= 0) {
949 reset_psn(qp, psn + 1);
950 tasklet_hi_schedule(&qp->s_task);
952 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
953 qp->s_state = OP(SEND_LAST);
956 ipath_get_credit(qp, aeth);
957 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
958 qp->s_retry = qp->s_retry_cnt;
959 update_last_psn(qp, psn);
963 case 1: /* RNR NAK */
965 if (qp->s_last == qp->s_tail)
967 if (qp->s_rnr_retry == 0) {
968 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
971 if (qp->s_rnr_retry_cnt < 7)
974 /* The last valid PSN is the previous PSN. */
975 update_last_psn(qp, psn - 1);
977 if (wqe->wr.opcode == IB_WR_RDMA_READ)
981 (qp->s_psn - psn) & IPATH_PSN_MASK;
986 ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
987 IPATH_AETH_CREDIT_MASK];
988 ipath_insert_rnr_queue(qp);
992 if (qp->s_last == qp->s_tail)
994 /* The last valid PSN is the previous PSN. */
995 update_last_psn(qp, psn - 1);
996 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
997 IPATH_AETH_CREDIT_MASK) {
998 case 0: /* PSN sequence error */
1001 * Back up to the responder's expected PSN.
1002 * Note that we might get a NAK in the middle of an
1003 * RDMA READ response which terminates the RDMA
1006 ipath_restart_rc(qp, psn, &wc);
1009 case 1: /* Invalid Request */
1010 wc.status = IB_WC_REM_INV_REQ_ERR;
1011 dev->n_other_naks++;
1014 case 2: /* Remote Access Error */
1015 wc.status = IB_WC_REM_ACCESS_ERR;
1016 dev->n_other_naks++;
1019 case 3: /* Remote Operation Error */
1020 wc.status = IB_WC_REM_OP_ERR;
1021 dev->n_other_naks++;
1023 wc.wr_id = wqe->wr.wr_id;
1024 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1028 wc.src_qp = qp->remote_qpn;
1030 wc.slid = qp->remote_ah_attr.dlid;
1031 wc.sl = qp->remote_ah_attr.sl;
1032 wc.dlid_path_bits = 0;
1034 ipath_sqerror_qp(qp, &wc);
1038 /* Ignore other reserved NAK error codes */
1041 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1044 default: /* 2: reserved */
1046 /* Ignore reserved NAK codes. */
1055 * ipath_rc_rcv_resp - process an incoming RC response packet
1056 * @dev: the device this packet came in on
1057 * @ohdr: the other headers for this packet
1058 * @data: the packet data
1059 * @tlen: the packet length
1060 * @qp: the QP for this packet
1061 * @opcode: the opcode for this packet
1062 * @psn: the packet sequence number for this packet
1063 * @hdrsize: the header length
1064 * @pmtu: the path MTU
1065 * @header_in_data: true if part of the header data is in the data buffer
1067 * This is called from ipath_rc_rcv() to process an incoming RC response
1068 * packet for the given QP.
1069 * Called at interrupt level.
1071 static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1072 struct ipath_other_headers *ohdr,
1073 void *data, u32 tlen,
1074 struct ipath_qp *qp,
1076 u32 psn, u32 hdrsize, u32 pmtu,
1079 struct ipath_swqe *wqe;
1080 unsigned long flags;
1086 spin_lock_irqsave(&qp->s_lock, flags);
1088 /* Ignore invalid responses. */
1089 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1092 /* Ignore duplicate responses. */
1093 diff = ipath_cmp24(psn, qp->s_last_psn);
1094 if (unlikely(diff <= 0)) {
1095 /* Update credits for "ghost" ACKs */
1096 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1097 if (!header_in_data)
1098 aeth = be32_to_cpu(ohdr->u.aeth);
1100 aeth = be32_to_cpu(((__be32 *) data)[0]);
1101 data += sizeof(__be32);
1103 if ((aeth >> 29) == 0)
1104 ipath_get_credit(qp, aeth);
1109 if (unlikely(qp->s_last == qp->s_tail))
1111 wqe = get_swqe_ptr(qp, qp->s_last);
1114 case OP(ACKNOWLEDGE):
1115 case OP(ATOMIC_ACKNOWLEDGE):
1116 case OP(RDMA_READ_RESPONSE_FIRST):
1117 if (!header_in_data)
1118 aeth = be32_to_cpu(ohdr->u.aeth);
1120 aeth = be32_to_cpu(((__be32 *) data)[0]);
1121 data += sizeof(__be32);
1123 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1126 if (!header_in_data) {
1127 __be32 *p = ohdr->u.at.atomic_ack_eth;
1129 val = ((u64) be32_to_cpu(p[0]) << 32) |
1132 val = be64_to_cpu(((__be64 *) data)[0]);
1133 *(u64 *) wqe->sg_list[0].vaddr = val;
1135 if (!do_rc_ack(qp, aeth, psn, opcode) ||
1136 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1139 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1142 * If this is a response to a resent RDMA read, we
1143 * have to be careful to copy the data to the right
1146 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1150 case OP(RDMA_READ_RESPONSE_MIDDLE):
1151 /* no AETH, no ACK */
1152 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1154 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1157 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1160 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1162 if (unlikely(pmtu >= qp->s_rdma_read_len))
1165 /* We got a response so update the timeout. */
1166 spin_lock(&dev->pending_lock);
1167 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1168 list_move_tail(&qp->timerwait,
1169 &dev->pending[dev->pending_index]);
1170 spin_unlock(&dev->pending_lock);
1172 * Update the RDMA receive state but do the copy w/o
1173 * holding the locks and blocking interrupts.
1175 qp->s_rdma_read_len -= pmtu;
1176 update_last_psn(qp, psn);
1177 spin_unlock_irqrestore(&qp->s_lock, flags);
1178 ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
1181 case OP(RDMA_READ_RESPONSE_ONLY):
1182 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1184 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1187 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1189 /* Get the number of bytes the message was padded by. */
1190 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1192 * Check that the data size is >= 0 && <= pmtu.
1193 * Remember to account for the AETH header (4) and
1196 if (unlikely(tlen < (hdrsize + pad + 8)))
1199 * If this is a response to a resent RDMA read, we
1200 * have to be careful to copy the data to the right
1203 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1207 case OP(RDMA_READ_RESPONSE_LAST):
1208 /* ACKs READ req. */
1209 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1211 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1214 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1216 /* Get the number of bytes the message was padded by. */
1217 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1219 * Check that the data size is >= 1 && <= pmtu.
1220 * Remember to account for the AETH header (4) and
1223 if (unlikely(tlen <= (hdrsize + pad + 8)))
1226 tlen -= hdrsize + pad + 8;
1227 if (unlikely(tlen != qp->s_rdma_read_len))
1229 if (!header_in_data)
1230 aeth = be32_to_cpu(ohdr->u.aeth);
1232 aeth = be32_to_cpu(((__be32 *) data)[0]);
1233 data += sizeof(__be32);
1235 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
1236 (void) do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST));
1241 spin_unlock_irqrestore(&qp->s_lock, flags);
1245 wc.status = IB_WC_LOC_QP_OP_ERR;
1249 wc.status = IB_WC_LOC_LEN_ERR;
1251 wc.wr_id = wqe->wr.wr_id;
1252 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1257 wc.src_qp = qp->remote_qpn;
1260 wc.slid = qp->remote_ah_attr.dlid;
1261 wc.sl = qp->remote_ah_attr.sl;
1262 wc.dlid_path_bits = 0;
1264 ipath_sqerror_qp(qp, &wc);
1270 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1271 * @dev: the device this packet came in on
1272 * @ohdr: the other headers for this packet
1273 * @data: the packet data
1274 * @qp: the QP for this packet
1275 * @opcode: the opcode for this packet
1276 * @psn: the packet sequence number for this packet
1277 * @diff: the difference between the PSN and the expected PSN
1278 * @header_in_data: true if part of the header data is in the data buffer
1280 * This is called from ipath_rc_rcv() to process an unexpected
1281 * incoming RC packet for the given QP.
1282 * Called at interrupt level.
1283 * Return 1 if no more processing is needed; otherwise return 0 to
1284 * schedule a response to be sent.
1286 static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1287 struct ipath_other_headers *ohdr,
1289 struct ipath_qp *qp,
1295 struct ipath_ack_entry *e;
1298 unsigned long flags;
1302 * Packet sequence error.
1303 * A NAK will ACK earlier sends and RDMA writes.
1304 * Don't queue the NAK if we already sent one.
1306 if (!qp->r_nak_state) {
1307 qp->r_nak_state = IB_NAK_PSN_ERROR;
1308 /* Use the expected PSN. */
1309 qp->r_ack_psn = qp->r_psn;
1316 * Handle a duplicate request. Don't re-execute SEND, RDMA
1317 * write or atomic op. Don't NAK errors, just silently drop
1318 * the duplicate request. Note that r_sge, r_len, and
1319 * r_rcv_len may be in use so don't modify them.
1321 * We are supposed to ACK the earliest duplicate PSN but we
1322 * can coalesce an outstanding duplicate ACK. We have to
1323 * send the earliest so that RDMA reads can be restarted at
1324 * the requester's expected PSN.
1326 * First, find where this duplicate PSN falls within the
1327 * ACKs previously sent.
1329 psn &= IPATH_PSN_MASK;
1332 spin_lock_irqsave(&qp->s_lock, flags);
1333 for (i = qp->r_head_ack_queue; ; i = prev) {
1334 if (i == qp->s_tail_ack_queue)
1339 prev = IPATH_MAX_RDMA_ATOMIC;
1340 if (prev == qp->r_head_ack_queue) {
1344 e = &qp->s_ack_queue[prev];
1349 if (ipath_cmp24(psn, e->psn) >= 0)
1353 case OP(RDMA_READ_REQUEST): {
1354 struct ib_reth *reth;
1359 * If we didn't find the RDMA read request in the ack queue,
1360 * or the send tasklet is already backed up to send an
1361 * earlier entry, we can ignore this request.
1363 if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
1365 /* RETH comes after BTH */
1366 if (!header_in_data)
1367 reth = &ohdr->u.rc.reth;
1369 reth = (struct ib_reth *)data;
1370 data += sizeof(*reth);
1373 * Address range must be a subset of the original
1374 * request and start on pmtu boundaries.
1375 * We reuse the old ack_queue slot since the requester
1376 * should not back up and request an earlier PSN for the
1379 offset = ((psn - e->psn) & IPATH_PSN_MASK) *
1380 ib_mtu_enum_to_int(qp->path_mtu);
1381 len = be32_to_cpu(reth->length);
1382 if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
1385 u32 rkey = be32_to_cpu(reth->rkey);
1386 u64 vaddr = be64_to_cpu(reth->vaddr);
1389 ok = ipath_rkey_ok(qp, &e->rdma_sge,
1391 IB_ACCESS_REMOTE_READ);
1395 e->rdma_sge.sg_list = NULL;
1396 e->rdma_sge.num_sge = 0;
1397 e->rdma_sge.sge.mr = NULL;
1398 e->rdma_sge.sge.vaddr = NULL;
1399 e->rdma_sge.sge.length = 0;
1400 e->rdma_sge.sge.sge_length = 0;
1403 qp->s_ack_state = OP(ACKNOWLEDGE);
1404 qp->s_tail_ack_queue = prev;
1408 case OP(COMPARE_SWAP):
1409 case OP(FETCH_ADD): {
1411 * If we didn't find the atomic request in the ack queue
1412 * or the send tasklet is already backed up to send an
1413 * earlier entry, we can ignore this request.
1415 if (!e || e->opcode != (u8) opcode || old_req)
1417 qp->s_ack_state = OP(ACKNOWLEDGE);
1418 qp->s_tail_ack_queue = prev;
1426 * Resend the most recent ACK if this request is
1427 * after all the previous RDMA reads and atomics.
1429 if (i == qp->r_head_ack_queue) {
1430 spin_unlock_irqrestore(&qp->s_lock, flags);
1431 qp->r_nak_state = 0;
1432 qp->r_ack_psn = qp->r_psn - 1;
1436 * Resend the RDMA read or atomic op which
1437 * ACKs this duplicate request.
1439 qp->s_ack_state = OP(ACKNOWLEDGE);
1440 qp->s_tail_ack_queue = i;
1443 qp->r_nak_state = 0;
1444 spin_unlock_irq(&qp->s_lock);
1445 tasklet_hi_schedule(&qp->s_task);
1448 spin_unlock_irqrestore(&qp->s_lock, flags);
1456 static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1458 unsigned long flags;
1460 spin_lock_irqsave(&qp->s_lock, flags);
1461 qp->state = IB_QPS_ERR;
1462 ipath_error_qp(qp, err);
1463 spin_unlock_irqrestore(&qp->s_lock, flags);
1467 * ipath_rc_rcv - process an incoming RC packet
1468 * @dev: the device this packet came in on
1469 * @hdr: the header of this packet
1470 * @has_grh: true if the header has a GRH
1471 * @data: the packet data
1472 * @tlen: the packet length
1473 * @qp: the QP for this packet
1475 * This is called from ipath_qp_rcv() to process an incoming RC packet
1477 * Called at interrupt level.
1479 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1480 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1482 struct ipath_other_headers *ohdr;
1488 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1490 struct ib_reth *reth;
1493 /* Validate the SLID. See Ch. 9.6.1.5 */
1494 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
1500 hdrsize = 8 + 12; /* LRH + BTH */
1501 psn = be32_to_cpu(ohdr->bth[2]);
1504 ohdr = &hdr->u.l.oth;
1505 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1507 * The header with GRH is 60 bytes and the core driver sets
1508 * the eager header buffer size to 56 bytes so the last 4
1509 * bytes of the BTH header (PSN) is in the data buffer.
1511 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
1512 if (header_in_data) {
1513 psn = be32_to_cpu(((__be32 *) data)[0]);
1514 data += sizeof(__be32);
1516 psn = be32_to_cpu(ohdr->bth[2]);
1520 * Process responses (ACKs) before anything else. Note that the
1521 * packet sequence number will be for something in the send work
1522 * queue rather than the expected receive packet sequence number.
1523 * In other words, this QP is the requester.
1525 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1526 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1527 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1528 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1529 hdrsize, pmtu, header_in_data);
1533 /* Compute 24 bits worth of difference. */
1534 diff = ipath_cmp24(psn, qp->r_psn);
1535 if (unlikely(diff)) {
1536 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1537 psn, diff, header_in_data))
1542 /* Check for opcode sequence errors. */
1543 switch (qp->r_state) {
1544 case OP(SEND_FIRST):
1545 case OP(SEND_MIDDLE):
1546 if (opcode == OP(SEND_MIDDLE) ||
1547 opcode == OP(SEND_LAST) ||
1548 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1551 ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR);
1552 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1553 qp->r_ack_psn = qp->r_psn;
1556 case OP(RDMA_WRITE_FIRST):
1557 case OP(RDMA_WRITE_MIDDLE):
1558 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1559 opcode == OP(RDMA_WRITE_LAST) ||
1560 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1565 if (opcode == OP(SEND_MIDDLE) ||
1566 opcode == OP(SEND_LAST) ||
1567 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1568 opcode == OP(RDMA_WRITE_MIDDLE) ||
1569 opcode == OP(RDMA_WRITE_LAST) ||
1570 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1573 * Note that it is up to the requester to not send a new
1574 * RDMA read or atomic operation before receiving an ACK
1575 * for the previous operation.
1583 /* OK, process the packet. */
1585 case OP(SEND_FIRST):
1586 if (!ipath_get_rwqe(qp, 0)) {
1589 * A RNR NAK will ACK earlier sends and RDMA writes.
1590 * Don't queue the NAK if a RDMA read or atomic
1591 * is pending though.
1593 if (qp->r_nak_state)
1595 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1596 qp->r_ack_psn = qp->r_psn;
1601 case OP(SEND_MIDDLE):
1602 case OP(RDMA_WRITE_MIDDLE):
1604 /* Check for invalid length PMTU or posted rwqe len. */
1605 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1607 qp->r_rcv_len += pmtu;
1608 if (unlikely(qp->r_rcv_len > qp->r_len))
1610 ipath_copy_sge(&qp->r_sge, data, pmtu);
1613 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1615 if (!ipath_get_rwqe(qp, 1))
1620 case OP(SEND_ONLY_WITH_IMMEDIATE):
1621 if (!ipath_get_rwqe(qp, 0))
1624 if (opcode == OP(SEND_ONLY))
1627 case OP(SEND_LAST_WITH_IMMEDIATE):
1629 if (header_in_data) {
1630 wc.imm_data = *(__be32 *) data;
1631 data += sizeof(__be32);
1633 /* Immediate data comes after BTH */
1634 wc.imm_data = ohdr->u.imm_data;
1637 wc.wc_flags = IB_WC_WITH_IMM;
1640 case OP(RDMA_WRITE_LAST):
1642 /* Get the number of bytes the message was padded by. */
1643 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1644 /* Check for invalid length. */
1645 /* XXX LAST len should be >= 1 */
1646 if (unlikely(tlen < (hdrsize + pad + 4)))
1648 /* Don't count the CRC. */
1649 tlen -= (hdrsize + pad + 4);
1650 wc.byte_len = tlen + qp->r_rcv_len;
1651 if (unlikely(wc.byte_len > qp->r_len))
1653 ipath_copy_sge(&qp->r_sge, data, tlen);
1655 if (!qp->r_wrid_valid)
1657 qp->r_wrid_valid = 0;
1658 wc.wr_id = qp->r_wr_id;
1659 wc.status = IB_WC_SUCCESS;
1660 wc.opcode = IB_WC_RECV;
1663 wc.src_qp = qp->remote_qpn;
1665 wc.slid = qp->remote_ah_attr.dlid;
1666 wc.sl = qp->remote_ah_attr.sl;
1667 wc.dlid_path_bits = 0;
1669 /* Signal completion event if the solicited bit is set. */
1670 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1672 __constant_cpu_to_be32(1 << 23)) != 0);
1675 case OP(RDMA_WRITE_FIRST):
1676 case OP(RDMA_WRITE_ONLY):
1677 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1679 /* RETH comes after BTH */
1680 if (!header_in_data)
1681 reth = &ohdr->u.rc.reth;
1683 reth = (struct ib_reth *)data;
1684 data += sizeof(*reth);
1686 hdrsize += sizeof(*reth);
1687 qp->r_len = be32_to_cpu(reth->length);
1689 if (qp->r_len != 0) {
1690 u32 rkey = be32_to_cpu(reth->rkey);
1691 u64 vaddr = be64_to_cpu(reth->vaddr);
1694 /* Check rkey & NAK */
1695 ok = ipath_rkey_ok(qp, &qp->r_sge,
1696 qp->r_len, vaddr, rkey,
1697 IB_ACCESS_REMOTE_WRITE);
1701 qp->r_sge.sg_list = NULL;
1702 qp->r_sge.sge.mr = NULL;
1703 qp->r_sge.sge.vaddr = NULL;
1704 qp->r_sge.sge.length = 0;
1705 qp->r_sge.sge.sge_length = 0;
1707 if (unlikely(!(qp->qp_access_flags &
1708 IB_ACCESS_REMOTE_WRITE)))
1710 if (opcode == OP(RDMA_WRITE_FIRST))
1712 else if (opcode == OP(RDMA_WRITE_ONLY))
1714 if (!ipath_get_rwqe(qp, 1))
1718 case OP(RDMA_READ_REQUEST): {
1719 struct ipath_ack_entry *e;
1723 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
1725 next = qp->r_head_ack_queue + 1;
1726 if (next > IPATH_MAX_RDMA_ATOMIC)
1728 if (unlikely(next == qp->s_tail_ack_queue))
1730 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1731 /* RETH comes after BTH */
1732 if (!header_in_data)
1733 reth = &ohdr->u.rc.reth;
1735 reth = (struct ib_reth *)data;
1736 data += sizeof(*reth);
1738 len = be32_to_cpu(reth->length);
1740 u32 rkey = be32_to_cpu(reth->rkey);
1741 u64 vaddr = be64_to_cpu(reth->vaddr);
1744 /* Check rkey & NAK */
1745 ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1746 rkey, IB_ACCESS_REMOTE_READ);
1750 * Update the next expected PSN. We add 1 later
1751 * below, so only add the remainder here.
1754 qp->r_psn += (len - 1) / pmtu;
1756 e->rdma_sge.sg_list = NULL;
1757 e->rdma_sge.num_sge = 0;
1758 e->rdma_sge.sge.mr = NULL;
1759 e->rdma_sge.sge.vaddr = NULL;
1760 e->rdma_sge.sge.length = 0;
1761 e->rdma_sge.sge.sge_length = 0;
1766 * We need to increment the MSN here instead of when we
1767 * finish sending the result since a duplicate request would
1768 * increment it more than once.
1772 qp->r_state = opcode;
1773 qp->r_nak_state = 0;
1775 qp->r_head_ack_queue = next;
1777 /* Call ipath_do_rc_send() in another thread. */
1778 tasklet_hi_schedule(&qp->s_task);
1783 case OP(COMPARE_SWAP):
1784 case OP(FETCH_ADD): {
1785 struct ib_atomic_eth *ateth;
1786 struct ipath_ack_entry *e;
1793 if (unlikely(!(qp->qp_access_flags &
1794 IB_ACCESS_REMOTE_ATOMIC)))
1796 next = qp->r_head_ack_queue + 1;
1797 if (next > IPATH_MAX_RDMA_ATOMIC)
1799 if (unlikely(next == qp->s_tail_ack_queue))
1801 if (!header_in_data)
1802 ateth = &ohdr->u.atomic_eth;
1804 ateth = (struct ib_atomic_eth *)data;
1805 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
1806 be32_to_cpu(ateth->vaddr[1]);
1807 if (unlikely(vaddr & (sizeof(u64) - 1)))
1809 rkey = be32_to_cpu(ateth->rkey);
1810 /* Check rkey & NAK */
1811 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
1812 sizeof(u64), vaddr, rkey,
1813 IB_ACCESS_REMOTE_ATOMIC)))
1815 /* Perform atomic OP and save result. */
1816 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
1817 sdata = be64_to_cpu(ateth->swap_data);
1818 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1819 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
1820 (u64) atomic64_add_return(sdata, maddr) - sdata :
1821 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
1822 be64_to_cpu(ateth->compare_data),
1825 e->psn = psn & IPATH_PSN_MASK;
1828 qp->r_state = opcode;
1829 qp->r_nak_state = 0;
1831 qp->r_head_ack_queue = next;
1833 /* Call ipath_do_rc_send() in another thread. */
1834 tasklet_hi_schedule(&qp->s_task);
1840 /* NAK unknown opcodes. */
1844 qp->r_state = opcode;
1845 qp->r_ack_psn = psn;
1846 qp->r_nak_state = 0;
1847 /* Send an ACK if requested or required. */
1848 if (psn & (1 << 31))
1853 ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR);
1854 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1855 qp->r_ack_psn = qp->r_psn;