2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "ipath_verbs.h"
37 #include "ipath_kernel.h"
39 /* cut down ridiculously long IB macro names */
40 #define OP(x) IB_OPCODE_RC_##x
42 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
48 ss->sge = wqe->sg_list[0];
49 ss->sg_list = wqe->sg_list + 1;
50 ss->num_sge = wqe->wr.num_sge;
51 ipath_skip_sge(ss, len);
52 return wqe->length - len;
56 * ipath_init_restart- initialize the qp->s_sge after a restart
57 * @qp: the QP who's SGE we're restarting
58 * @wqe: the work queue to initialize the QP's SGE from
60 * The QP s_lock should be held and interrupts disabled.
62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
64 struct ipath_ibdev *dev;
66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
67 ib_mtu_enum_to_int(qp->path_mtu));
68 dev = to_idev(qp->ibqp.device);
69 spin_lock(&dev->pending_lock);
70 if (list_empty(&qp->timerwait))
71 list_add_tail(&qp->timerwait,
72 &dev->pending[dev->pending_index]);
73 spin_unlock(&dev->pending_lock);
77 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
78 * @qp: a pointer to the QP
79 * @ohdr: a pointer to the IB header being constructed
82 * Return 1 if constructed; otherwise, return 0.
83 * Note that we are in the responder's side of the QP context.
84 * Note the QP s_lock must be held.
86 static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
87 struct ipath_other_headers *ohdr, u32 pmtu)
89 struct ipath_ack_entry *e;
95 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
98 switch (qp->s_ack_state) {
99 case OP(RDMA_READ_RESPONSE_LAST):
100 case OP(RDMA_READ_RESPONSE_ONLY):
101 case OP(ATOMIC_ACKNOWLEDGE):
103 * We can increment the tail pointer now that the last
104 * response has been sent instead of only being
107 if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
108 qp->s_tail_ack_queue = 0;
111 case OP(ACKNOWLEDGE):
112 /* Check for no next entry in the queue. */
113 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
114 if (qp->s_flags & IPATH_S_ACK_PENDING)
116 qp->s_ack_state = OP(ACKNOWLEDGE);
120 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
121 if (e->opcode == OP(RDMA_READ_REQUEST)) {
122 /* Copy SGE state in case we need to resend */
123 qp->s_ack_rdma_sge = e->rdma_sge;
124 qp->s_cur_sge = &qp->s_ack_rdma_sge;
125 len = e->rdma_sge.sge.sge_length;
128 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
130 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
133 ohdr->u.aeth = ipath_compute_aeth(qp);
135 qp->s_ack_rdma_psn = e->psn;
136 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
138 /* COMPARE_SWAP or FETCH_ADD */
139 qp->s_cur_sge = NULL;
141 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
142 ohdr->u.at.aeth = ipath_compute_aeth(qp);
143 ohdr->u.at.atomic_ack_eth[0] =
144 cpu_to_be32(e->atomic_data >> 32);
145 ohdr->u.at.atomic_ack_eth[1] =
146 cpu_to_be32(e->atomic_data);
147 hwords += sizeof(ohdr->u.at) / sizeof(u32);
151 bth0 = qp->s_ack_state << 24;
154 case OP(RDMA_READ_RESPONSE_FIRST):
155 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
157 case OP(RDMA_READ_RESPONSE_MIDDLE):
158 len = qp->s_ack_rdma_sge.sge.sge_length;
162 ohdr->u.aeth = ipath_compute_aeth(qp);
164 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
165 qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
167 bth0 = qp->s_ack_state << 24;
168 bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
174 * Send a regular ACK.
175 * Set the s_ack_state so we wait until after sending
176 * the ACK before setting s_ack_state to ACKNOWLEDGE
179 qp->s_ack_state = OP(SEND_ONLY);
180 qp->s_flags &= ~IPATH_S_ACK_PENDING;
181 qp->s_cur_sge = NULL;
184 cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
186 IPATH_AETH_CREDIT_SHIFT));
188 ohdr->u.aeth = ipath_compute_aeth(qp);
191 bth0 = OP(ACKNOWLEDGE) << 24;
192 bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
194 qp->s_hdrwords = hwords;
195 qp->s_cur_size = len;
196 ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
204 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
205 * @qp: a pointer to the QP
207 * Return 1 if constructed; otherwise, return 0.
209 int ipath_make_rc_req(struct ipath_qp *qp)
211 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
212 struct ipath_other_headers *ohdr;
213 struct ipath_sge_state *ss;
214 struct ipath_swqe *wqe;
219 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
224 ohdr = &qp->s_hdr.u.oth;
225 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
226 ohdr = &qp->s_hdr.u.l.oth;
229 * The lock is needed to synchronize between the sending tasklet,
230 * the receive interrupt handler, and timeout resends.
232 spin_lock_irqsave(&qp->s_lock, flags);
234 /* Sending responses has higher priority over sending requests. */
235 if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
236 (qp->s_flags & IPATH_S_ACK_PENDING) ||
237 qp->s_ack_state != OP(ACKNOWLEDGE)) &&
238 ipath_make_rc_ack(dev, qp, ohdr, pmtu))
241 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
242 qp->s_rnr_timeout || qp->s_wait_credit)
245 /* Limit the number of packets sent without an ACK. */
246 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) {
247 qp->s_wait_credit = 1;
252 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
254 bth0 = 1 << 22; /* Set M bit */
256 /* Send a request. */
257 wqe = get_swqe_ptr(qp, qp->s_cur);
258 switch (qp->s_state) {
261 * Resend an old request or start a new one.
263 * We keep track of the current SWQE so that
264 * we don't reset the "furthest progress" state
265 * if we need to back up.
268 if (qp->s_cur == qp->s_tail) {
269 /* Check if send work queue is empty. */
270 if (qp->s_tail == qp->s_head)
273 * If a fence is requested, wait for previous
274 * RDMA read and atomic operations to finish.
276 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
277 qp->s_num_rd_atomic) {
278 qp->s_flags |= IPATH_S_FENCE_PENDING;
281 wqe->psn = qp->s_next_psn;
285 * Note that we have to be careful not to modify the
286 * original work request since we may need to resend
292 switch (wqe->wr.opcode) {
294 case IB_WR_SEND_WITH_IMM:
295 /* If no credit, return. */
296 if (qp->s_lsn != (u32) -1 &&
297 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
299 wqe->lpsn = wqe->psn;
301 wqe->lpsn += (len - 1) / pmtu;
302 qp->s_state = OP(SEND_FIRST);
306 if (wqe->wr.opcode == IB_WR_SEND)
307 qp->s_state = OP(SEND_ONLY);
309 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
310 /* Immediate data comes after the BTH */
311 ohdr->u.imm_data = wqe->wr.ex.imm_data;
314 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
316 bth2 = 1 << 31; /* Request ACK. */
317 if (++qp->s_cur == qp->s_size)
321 case IB_WR_RDMA_WRITE:
322 if (newreq && qp->s_lsn != (u32) -1)
325 case IB_WR_RDMA_WRITE_WITH_IMM:
326 /* If no credit, return. */
327 if (qp->s_lsn != (u32) -1 &&
328 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
330 ohdr->u.rc.reth.vaddr =
331 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
332 ohdr->u.rc.reth.rkey =
333 cpu_to_be32(wqe->wr.wr.rdma.rkey);
334 ohdr->u.rc.reth.length = cpu_to_be32(len);
335 hwords += sizeof(struct ib_reth) / sizeof(u32);
336 wqe->lpsn = wqe->psn;
338 wqe->lpsn += (len - 1) / pmtu;
339 qp->s_state = OP(RDMA_WRITE_FIRST);
343 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
344 qp->s_state = OP(RDMA_WRITE_ONLY);
347 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
348 /* Immediate data comes after RETH */
349 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
351 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
354 bth2 = 1 << 31; /* Request ACK. */
355 if (++qp->s_cur == qp->s_size)
359 case IB_WR_RDMA_READ:
361 * Don't allow more operations to be started
362 * than the QP limits allow.
365 if (qp->s_num_rd_atomic >=
366 qp->s_max_rd_atomic) {
367 qp->s_flags |= IPATH_S_RDMAR_PENDING;
370 qp->s_num_rd_atomic++;
371 if (qp->s_lsn != (u32) -1)
374 * Adjust s_next_psn to count the
375 * expected number of responses.
378 qp->s_next_psn += (len - 1) / pmtu;
379 wqe->lpsn = qp->s_next_psn++;
381 ohdr->u.rc.reth.vaddr =
382 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
383 ohdr->u.rc.reth.rkey =
384 cpu_to_be32(wqe->wr.wr.rdma.rkey);
385 ohdr->u.rc.reth.length = cpu_to_be32(len);
386 qp->s_state = OP(RDMA_READ_REQUEST);
387 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
390 if (++qp->s_cur == qp->s_size)
394 case IB_WR_ATOMIC_CMP_AND_SWP:
395 case IB_WR_ATOMIC_FETCH_AND_ADD:
397 * Don't allow more operations to be started
398 * than the QP limits allow.
401 if (qp->s_num_rd_atomic >=
402 qp->s_max_rd_atomic) {
403 qp->s_flags |= IPATH_S_RDMAR_PENDING;
406 qp->s_num_rd_atomic++;
407 if (qp->s_lsn != (u32) -1)
409 wqe->lpsn = wqe->psn;
411 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
412 qp->s_state = OP(COMPARE_SWAP);
413 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
414 wqe->wr.wr.atomic.swap);
415 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
416 wqe->wr.wr.atomic.compare_add);
418 qp->s_state = OP(FETCH_ADD);
419 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
420 wqe->wr.wr.atomic.compare_add);
421 ohdr->u.atomic_eth.compare_data = 0;
423 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
424 wqe->wr.wr.atomic.remote_addr >> 32);
425 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
426 wqe->wr.wr.atomic.remote_addr);
427 ohdr->u.atomic_eth.rkey = cpu_to_be32(
428 wqe->wr.wr.atomic.rkey);
429 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
432 if (++qp->s_cur == qp->s_size)
439 qp->s_sge.sge = wqe->sg_list[0];
440 qp->s_sge.sg_list = wqe->sg_list + 1;
441 qp->s_sge.num_sge = wqe->wr.num_sge;
442 qp->s_len = wqe->length;
445 if (qp->s_tail >= qp->s_size)
448 bth2 |= qp->s_psn & IPATH_PSN_MASK;
449 if (wqe->wr.opcode == IB_WR_RDMA_READ)
450 qp->s_psn = wqe->lpsn + 1;
453 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
454 qp->s_next_psn = qp->s_psn;
457 * Put the QP on the pending list so lost ACKs will cause
458 * a retry. More than one request can be pending so the
459 * QP may already be on the dev->pending list.
461 spin_lock(&dev->pending_lock);
462 if (list_empty(&qp->timerwait))
463 list_add_tail(&qp->timerwait,
464 &dev->pending[dev->pending_index]);
465 spin_unlock(&dev->pending_lock);
468 case OP(RDMA_READ_RESPONSE_FIRST):
470 * This case can only happen if a send is restarted.
471 * See ipath_restart_rc().
473 ipath_init_restart(qp, wqe);
476 qp->s_state = OP(SEND_MIDDLE);
478 case OP(SEND_MIDDLE):
479 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
480 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
481 qp->s_next_psn = qp->s_psn;
488 if (wqe->wr.opcode == IB_WR_SEND)
489 qp->s_state = OP(SEND_LAST);
491 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
492 /* Immediate data comes after the BTH */
493 ohdr->u.imm_data = wqe->wr.ex.imm_data;
496 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
498 bth2 |= 1 << 31; /* Request ACK. */
500 if (qp->s_cur >= qp->s_size)
504 case OP(RDMA_READ_RESPONSE_LAST):
506 * This case can only happen if a RDMA write is restarted.
507 * See ipath_restart_rc().
509 ipath_init_restart(qp, wqe);
511 case OP(RDMA_WRITE_FIRST):
512 qp->s_state = OP(RDMA_WRITE_MIDDLE);
514 case OP(RDMA_WRITE_MIDDLE):
515 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
516 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
517 qp->s_next_psn = qp->s_psn;
524 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
525 qp->s_state = OP(RDMA_WRITE_LAST);
527 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
528 /* Immediate data comes after the BTH */
529 ohdr->u.imm_data = wqe->wr.ex.imm_data;
531 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
534 bth2 |= 1 << 31; /* Request ACK. */
536 if (qp->s_cur >= qp->s_size)
540 case OP(RDMA_READ_RESPONSE_MIDDLE):
542 * This case can only happen if a RDMA read is restarted.
543 * See ipath_restart_rc().
545 ipath_init_restart(qp, wqe);
546 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
547 ohdr->u.rc.reth.vaddr =
548 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
549 ohdr->u.rc.reth.rkey =
550 cpu_to_be32(wqe->wr.wr.rdma.rkey);
551 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
552 qp->s_state = OP(RDMA_READ_REQUEST);
553 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
554 bth2 = qp->s_psn++ & IPATH_PSN_MASK;
555 if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
556 qp->s_next_psn = qp->s_psn;
560 if (qp->s_cur == qp->s_size)
564 if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
565 bth2 |= 1 << 31; /* Request ACK. */
567 qp->s_hdrwords = hwords;
569 qp->s_cur_size = len;
570 ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
574 spin_unlock_irqrestore(&qp->s_lock, flags);
579 * send_rc_ack - Construct an ACK packet and send it
580 * @qp: a pointer to the QP
582 * This is called from ipath_rc_rcv() and only uses the receive
584 * Note that RDMA reads and atomics are handled in the
585 * send side QP state and tasklet.
587 static void send_rc_ack(struct ipath_qp *qp)
589 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
590 struct ipath_devdata *dd;
595 struct ipath_ib_header hdr;
596 struct ipath_other_headers *ohdr;
599 spin_lock_irqsave(&qp->s_lock, flags);
601 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
602 if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
603 (qp->s_flags & IPATH_S_ACK_PENDING) ||
604 qp->s_ack_state != OP(ACKNOWLEDGE))
607 spin_unlock_irqrestore(&qp->s_lock, flags);
610 piobuf = ipath_getpiobuf(dd, 0, NULL);
613 * We are out of PIO buffers at the moment.
614 * Pass responsibility for sending the ACK to the
615 * send tasklet so that when a PIO buffer becomes
616 * available, the ACK is sent ahead of other outgoing
619 spin_lock_irqsave(&qp->s_lock, flags);
623 /* Construct the header. */
625 lrh0 = IPATH_LRH_BTH;
626 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
628 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
629 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
630 &qp->remote_ah_attr.grh,
633 lrh0 = IPATH_LRH_GRH;
635 /* read pkey_index w/o lock (its atomic) */
636 bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
637 (OP(ACKNOWLEDGE) << 24) | (1 << 22);
639 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
641 IPATH_AETH_CREDIT_SHIFT));
643 ohdr->u.aeth = ipath_compute_aeth(qp);
644 lrh0 |= qp->remote_ah_attr.sl << 4;
645 hdr.lrh[0] = cpu_to_be16(lrh0);
646 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
647 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
648 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
649 ohdr->bth[0] = cpu_to_be32(bth0);
650 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
651 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
653 writeq(hwords + 1, piobuf);
655 if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
656 u32 *hdrp = (u32 *) &hdr;
659 __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
661 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
663 __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
667 dev->n_unicast_xmit++;
672 qp->s_flags |= IPATH_S_ACK_PENDING;
673 qp->s_nak_state = qp->r_nak_state;
674 qp->s_ack_psn = qp->r_ack_psn;
675 spin_unlock_irqrestore(&qp->s_lock, flags);
677 /* Call ipath_do_rc_send() in another thread. */
678 tasklet_hi_schedule(&qp->s_task);
685 * reset_psn - reset the QP state to send starting from PSN
687 * @psn: the packet sequence number to restart at
689 * This is called from ipath_rc_rcv() to process an incoming RC ACK
691 * Called at interrupt level with the QP s_lock held.
693 static void reset_psn(struct ipath_qp *qp, u32 psn)
696 struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
702 * If we are starting the request from the beginning,
703 * let the normal send code handle initialization.
705 if (ipath_cmp24(psn, wqe->psn) <= 0) {
706 qp->s_state = OP(SEND_LAST);
710 /* Find the work request opcode corresponding to the given PSN. */
711 opcode = wqe->wr.opcode;
715 if (++n == qp->s_size)
719 wqe = get_swqe_ptr(qp, n);
720 diff = ipath_cmp24(psn, wqe->psn);
725 * If we are starting the request from the beginning,
726 * let the normal send code handle initialization.
729 qp->s_state = OP(SEND_LAST);
732 opcode = wqe->wr.opcode;
736 * Set the state to restart in the middle of a request.
737 * Don't change the s_sge, s_cur_sge, or s_cur_size.
738 * See ipath_do_rc_send().
742 case IB_WR_SEND_WITH_IMM:
743 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
746 case IB_WR_RDMA_WRITE:
747 case IB_WR_RDMA_WRITE_WITH_IMM:
748 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
751 case IB_WR_RDMA_READ:
752 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
757 * This case shouldn't happen since its only
760 qp->s_state = OP(SEND_LAST);
767 * ipath_restart_rc - back up requester to resend the last un-ACKed request
768 * @qp: the QP to restart
769 * @psn: packet sequence number for the request
770 * @wc: the work completion request
772 * The QP s_lock should be held and interrupts disabled.
774 void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
776 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
777 struct ipath_ibdev *dev;
779 if (qp->s_retry == 0) {
780 ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
781 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
787 * Remove the QP from the timeout queue.
788 * Note: it may already have been removed by ipath_ib_timer().
790 dev = to_idev(qp->ibqp.device);
791 spin_lock(&dev->pending_lock);
792 if (!list_empty(&qp->timerwait))
793 list_del_init(&qp->timerwait);
794 if (!list_empty(&qp->piowait))
795 list_del_init(&qp->piowait);
796 spin_unlock(&dev->pending_lock);
798 if (wqe->wr.opcode == IB_WR_RDMA_READ)
801 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
804 tasklet_hi_schedule(&qp->s_task);
810 static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
812 if (qp->s_last_psn != psn) {
813 qp->s_last_psn = psn;
814 if (qp->s_wait_credit) {
815 qp->s_wait_credit = 0;
816 tasklet_hi_schedule(&qp->s_task);
822 * do_rc_ack - process an incoming RC ACK
823 * @qp: the QP the ACK came in on
824 * @psn: the packet sequence number of the ACK
825 * @opcode: the opcode of the request that resulted in the ACK
827 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
829 * Called at interrupt level with the QP s_lock held and interrupts disabled.
830 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
832 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
835 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
837 enum ib_wc_status status;
838 struct ipath_swqe *wqe;
844 * Remove the QP from the timeout queue (or RNR timeout queue).
845 * If ipath_ib_timer() has already removed it,
846 * it's OK since we hold the QP s_lock and ipath_restart_rc()
847 * just won't find anything to restart if we ACK everything.
849 spin_lock(&dev->pending_lock);
850 if (!list_empty(&qp->timerwait))
851 list_del_init(&qp->timerwait);
852 spin_unlock(&dev->pending_lock);
855 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
856 * requests and implicitly NAK RDMA read and atomic requests issued
857 * before the NAK'ed request. The MSN won't include the NAK'ed
858 * request but will include an ACK'ed request(s).
863 wqe = get_swqe_ptr(qp, qp->s_last);
866 * The MSN might be for a later WQE than the PSN indicates so
867 * only complete WQEs that the PSN finishes.
869 while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
871 * RDMA_READ_RESPONSE_ONLY is a special case since
872 * we want to generate completion events for everything
873 * before the RDMA read, copy the data, then generate
874 * the completion for the read.
876 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
877 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
883 * If this request is a RDMA read or atomic, and the ACK is
884 * for a later operation, this ACK NAKs the RDMA read or
885 * atomic. In other words, only a RDMA_READ_LAST or ONLY
886 * can ACK a RDMA read and likewise for atomic ops. Note
887 * that the NAK case can only happen if relaxed ordering is
888 * used and requests are sent after an RDMA read or atomic
889 * is sent but before the response is received.
891 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
892 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
893 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
894 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
895 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
897 * The last valid PSN seen is the previous
900 update_last_psn(qp, wqe->psn - 1);
901 /* Retry this request. */
902 ipath_restart_rc(qp, wqe->psn);
904 * No need to process the ACK/NAK since we are
905 * restarting an earlier request.
909 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
910 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
911 *(u64 *) wqe->sg_list[0].vaddr = val;
912 if (qp->s_num_rd_atomic &&
913 (wqe->wr.opcode == IB_WR_RDMA_READ ||
914 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
915 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
916 qp->s_num_rd_atomic--;
917 /* Restart sending task if fence is complete */
918 if ((qp->s_flags & IPATH_S_FENCE_PENDING) &&
919 !qp->s_num_rd_atomic) {
920 qp->s_flags &= ~IPATH_S_FENCE_PENDING;
921 tasklet_hi_schedule(&qp->s_task);
922 } else if (qp->s_flags & IPATH_S_RDMAR_PENDING) {
923 qp->s_flags &= ~IPATH_S_RDMAR_PENDING;
924 tasklet_hi_schedule(&qp->s_task);
927 /* Post a send completion queue entry if requested. */
928 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
929 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
930 memset(&wc, 0, sizeof wc);
931 wc.wr_id = wqe->wr.wr_id;
932 wc.status = IB_WC_SUCCESS;
933 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
934 wc.byte_len = wqe->length;
936 wc.src_qp = qp->remote_qpn;
937 wc.slid = qp->remote_ah_attr.dlid;
938 wc.sl = qp->remote_ah_attr.sl;
939 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
941 qp->s_retry = qp->s_retry_cnt;
943 * If we are completing a request which is in the process of
944 * being resent, we can stop resending it since we know the
945 * responder has already seen it.
947 if (qp->s_last == qp->s_cur) {
948 if (++qp->s_cur >= qp->s_size)
950 qp->s_last = qp->s_cur;
951 if (qp->s_last == qp->s_tail)
953 wqe = get_swqe_ptr(qp, qp->s_cur);
954 qp->s_state = OP(SEND_LAST);
955 qp->s_psn = wqe->psn;
957 if (++qp->s_last >= qp->s_size)
959 if (qp->s_last == qp->s_tail)
961 wqe = get_swqe_ptr(qp, qp->s_last);
965 switch (aeth >> 29) {
968 /* If this is a partial ACK, reset the retransmit timer. */
969 if (qp->s_last != qp->s_tail) {
970 spin_lock(&dev->pending_lock);
971 if (list_empty(&qp->timerwait))
972 list_add_tail(&qp->timerwait,
973 &dev->pending[dev->pending_index]);
974 spin_unlock(&dev->pending_lock);
976 * If we get a partial ACK for a resent operation,
977 * we can stop resending the earlier packets and
978 * continue with the next packet the receiver wants.
980 if (ipath_cmp24(qp->s_psn, psn) <= 0) {
981 reset_psn(qp, psn + 1);
982 tasklet_hi_schedule(&qp->s_task);
984 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
985 qp->s_state = OP(SEND_LAST);
988 ipath_get_credit(qp, aeth);
989 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
990 qp->s_retry = qp->s_retry_cnt;
991 update_last_psn(qp, psn);
995 case 1: /* RNR NAK */
997 if (qp->s_last == qp->s_tail)
999 if (qp->s_rnr_retry == 0) {
1000 status = IB_WC_RNR_RETRY_EXC_ERR;
1003 if (qp->s_rnr_retry_cnt < 7)
1006 /* The last valid PSN is the previous PSN. */
1007 update_last_psn(qp, psn - 1);
1009 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1010 dev->n_rc_resends++;
1012 dev->n_rc_resends +=
1013 (qp->s_psn - psn) & IPATH_PSN_MASK;
1018 ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
1019 IPATH_AETH_CREDIT_MASK];
1020 ipath_insert_rnr_queue(qp);
1024 if (qp->s_last == qp->s_tail)
1026 /* The last valid PSN is the previous PSN. */
1027 update_last_psn(qp, psn - 1);
1028 switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
1029 IPATH_AETH_CREDIT_MASK) {
1030 case 0: /* PSN sequence error */
1033 * Back up to the responder's expected PSN.
1034 * Note that we might get a NAK in the middle of an
1035 * RDMA READ response which terminates the RDMA
1038 ipath_restart_rc(qp, psn);
1041 case 1: /* Invalid Request */
1042 status = IB_WC_REM_INV_REQ_ERR;
1043 dev->n_other_naks++;
1046 case 2: /* Remote Access Error */
1047 status = IB_WC_REM_ACCESS_ERR;
1048 dev->n_other_naks++;
1051 case 3: /* Remote Operation Error */
1052 status = IB_WC_REM_OP_ERR;
1053 dev->n_other_naks++;
1055 ipath_send_complete(qp, wqe, status);
1056 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1060 /* Ignore other reserved NAK error codes */
1063 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1066 default: /* 2: reserved */
1068 /* Ignore reserved NAK codes. */
1077 * ipath_rc_rcv_resp - process an incoming RC response packet
1078 * @dev: the device this packet came in on
1079 * @ohdr: the other headers for this packet
1080 * @data: the packet data
1081 * @tlen: the packet length
1082 * @qp: the QP for this packet
1083 * @opcode: the opcode for this packet
1084 * @psn: the packet sequence number for this packet
1085 * @hdrsize: the header length
1086 * @pmtu: the path MTU
1087 * @header_in_data: true if part of the header data is in the data buffer
1089 * This is called from ipath_rc_rcv() to process an incoming RC response
1090 * packet for the given QP.
1091 * Called at interrupt level.
1093 static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1094 struct ipath_other_headers *ohdr,
1095 void *data, u32 tlen,
1096 struct ipath_qp *qp,
1098 u32 psn, u32 hdrsize, u32 pmtu,
1101 struct ipath_swqe *wqe;
1102 enum ib_wc_status status;
1103 unsigned long flags;
1109 spin_lock_irqsave(&qp->s_lock, flags);
1111 /* Ignore invalid responses. */
1112 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1115 /* Ignore duplicate responses. */
1116 diff = ipath_cmp24(psn, qp->s_last_psn);
1117 if (unlikely(diff <= 0)) {
1118 /* Update credits for "ghost" ACKs */
1119 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1120 if (!header_in_data)
1121 aeth = be32_to_cpu(ohdr->u.aeth);
1123 aeth = be32_to_cpu(((__be32 *) data)[0]);
1124 data += sizeof(__be32);
1126 if ((aeth >> 29) == 0)
1127 ipath_get_credit(qp, aeth);
1132 if (unlikely(qp->s_last == qp->s_tail))
1134 wqe = get_swqe_ptr(qp, qp->s_last);
1135 status = IB_WC_SUCCESS;
1138 case OP(ACKNOWLEDGE):
1139 case OP(ATOMIC_ACKNOWLEDGE):
1140 case OP(RDMA_READ_RESPONSE_FIRST):
1141 if (!header_in_data)
1142 aeth = be32_to_cpu(ohdr->u.aeth);
1144 aeth = be32_to_cpu(((__be32 *) data)[0]);
1145 data += sizeof(__be32);
1147 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1148 if (!header_in_data) {
1149 __be32 *p = ohdr->u.at.atomic_ack_eth;
1151 val = ((u64) be32_to_cpu(p[0]) << 32) |
1154 val = be64_to_cpu(((__be64 *) data)[0]);
1157 if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
1158 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1161 wqe = get_swqe_ptr(qp, qp->s_last);
1162 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1165 * If this is a response to a resent RDMA read, we
1166 * have to be careful to copy the data to the right
1169 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1173 case OP(RDMA_READ_RESPONSE_MIDDLE):
1174 /* no AETH, no ACK */
1175 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1177 ipath_restart_rc(qp, qp->s_last_psn + 1);
1180 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1183 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1185 if (unlikely(pmtu >= qp->s_rdma_read_len))
1188 /* We got a response so update the timeout. */
1189 spin_lock(&dev->pending_lock);
1190 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1191 list_move_tail(&qp->timerwait,
1192 &dev->pending[dev->pending_index]);
1193 spin_unlock(&dev->pending_lock);
1195 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1196 qp->s_retry = qp->s_retry_cnt;
1199 * Update the RDMA receive state but do the copy w/o
1200 * holding the locks and blocking interrupts.
1202 qp->s_rdma_read_len -= pmtu;
1203 update_last_psn(qp, psn);
1204 spin_unlock_irqrestore(&qp->s_lock, flags);
1205 ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
1208 case OP(RDMA_READ_RESPONSE_ONLY):
1209 if (!header_in_data)
1210 aeth = be32_to_cpu(ohdr->u.aeth);
1212 aeth = be32_to_cpu(((__be32 *) data)[0]);
1213 if (!do_rc_ack(qp, aeth, psn, opcode, 0))
1215 /* Get the number of bytes the message was padded by. */
1216 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1218 * Check that the data size is >= 0 && <= pmtu.
1219 * Remember to account for the AETH header (4) and
1222 if (unlikely(tlen < (hdrsize + pad + 8)))
1225 * If this is a response to a resent RDMA read, we
1226 * have to be careful to copy the data to the right
1229 wqe = get_swqe_ptr(qp, qp->s_last);
1230 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1234 case OP(RDMA_READ_RESPONSE_LAST):
1235 /* ACKs READ req. */
1236 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1238 ipath_restart_rc(qp, qp->s_last_psn + 1);
1241 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1243 /* Get the number of bytes the message was padded by. */
1244 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1246 * Check that the data size is >= 1 && <= pmtu.
1247 * Remember to account for the AETH header (4) and
1250 if (unlikely(tlen <= (hdrsize + pad + 8)))
1253 tlen -= hdrsize + pad + 8;
1254 if (unlikely(tlen != qp->s_rdma_read_len))
1256 if (!header_in_data)
1257 aeth = be32_to_cpu(ohdr->u.aeth);
1259 aeth = be32_to_cpu(((__be32 *) data)[0]);
1260 data += sizeof(__be32);
1262 ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
1263 (void) do_rc_ack(qp, aeth, psn,
1264 OP(RDMA_READ_RESPONSE_LAST), 0);
1269 status = IB_WC_LOC_QP_OP_ERR;
1273 status = IB_WC_LOC_LEN_ERR;
1275 ipath_send_complete(qp, wqe, status);
1276 ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1278 spin_unlock_irqrestore(&qp->s_lock, flags);
1284 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1285 * @dev: the device this packet came in on
1286 * @ohdr: the other headers for this packet
1287 * @data: the packet data
1288 * @qp: the QP for this packet
1289 * @opcode: the opcode for this packet
1290 * @psn: the packet sequence number for this packet
1291 * @diff: the difference between the PSN and the expected PSN
1292 * @header_in_data: true if part of the header data is in the data buffer
1294 * This is called from ipath_rc_rcv() to process an unexpected
1295 * incoming RC packet for the given QP.
1296 * Called at interrupt level.
1297 * Return 1 if no more processing is needed; otherwise return 0 to
1298 * schedule a response to be sent.
1300 static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1301 struct ipath_other_headers *ohdr,
1303 struct ipath_qp *qp,
1309 struct ipath_ack_entry *e;
1312 unsigned long flags;
1316 * Packet sequence error.
1317 * A NAK will ACK earlier sends and RDMA writes.
1318 * Don't queue the NAK if we already sent one.
1320 if (!qp->r_nak_state) {
1321 qp->r_nak_state = IB_NAK_PSN_ERROR;
1322 /* Use the expected PSN. */
1323 qp->r_ack_psn = qp->r_psn;
1330 * Handle a duplicate request. Don't re-execute SEND, RDMA
1331 * write or atomic op. Don't NAK errors, just silently drop
1332 * the duplicate request. Note that r_sge, r_len, and
1333 * r_rcv_len may be in use so don't modify them.
1335 * We are supposed to ACK the earliest duplicate PSN but we
1336 * can coalesce an outstanding duplicate ACK. We have to
1337 * send the earliest so that RDMA reads can be restarted at
1338 * the requester's expected PSN.
1340 * First, find where this duplicate PSN falls within the
1341 * ACKs previously sent.
1343 psn &= IPATH_PSN_MASK;
1346 spin_lock_irqsave(&qp->s_lock, flags);
1347 for (i = qp->r_head_ack_queue; ; i = prev) {
1348 if (i == qp->s_tail_ack_queue)
1353 prev = IPATH_MAX_RDMA_ATOMIC;
1354 if (prev == qp->r_head_ack_queue) {
1358 e = &qp->s_ack_queue[prev];
1363 if (ipath_cmp24(psn, e->psn) >= 0) {
1364 if (prev == qp->s_tail_ack_queue)
1370 case OP(RDMA_READ_REQUEST): {
1371 struct ib_reth *reth;
1376 * If we didn't find the RDMA read request in the ack queue,
1377 * or the send tasklet is already backed up to send an
1378 * earlier entry, we can ignore this request.
1380 if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
1382 /* RETH comes after BTH */
1383 if (!header_in_data)
1384 reth = &ohdr->u.rc.reth;
1386 reth = (struct ib_reth *)data;
1387 data += sizeof(*reth);
1390 * Address range must be a subset of the original
1391 * request and start on pmtu boundaries.
1392 * We reuse the old ack_queue slot since the requester
1393 * should not back up and request an earlier PSN for the
1396 offset = ((psn - e->psn) & IPATH_PSN_MASK) *
1397 ib_mtu_enum_to_int(qp->path_mtu);
1398 len = be32_to_cpu(reth->length);
1399 if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
1402 u32 rkey = be32_to_cpu(reth->rkey);
1403 u64 vaddr = be64_to_cpu(reth->vaddr);
1406 ok = ipath_rkey_ok(qp, &e->rdma_sge,
1408 IB_ACCESS_REMOTE_READ);
1412 e->rdma_sge.sg_list = NULL;
1413 e->rdma_sge.num_sge = 0;
1414 e->rdma_sge.sge.mr = NULL;
1415 e->rdma_sge.sge.vaddr = NULL;
1416 e->rdma_sge.sge.length = 0;
1417 e->rdma_sge.sge.sge_length = 0;
1420 qp->s_ack_state = OP(ACKNOWLEDGE);
1421 qp->s_tail_ack_queue = prev;
1425 case OP(COMPARE_SWAP):
1426 case OP(FETCH_ADD): {
1428 * If we didn't find the atomic request in the ack queue
1429 * or the send tasklet is already backed up to send an
1430 * earlier entry, we can ignore this request.
1432 if (!e || e->opcode != (u8) opcode || old_req)
1434 qp->s_ack_state = OP(ACKNOWLEDGE);
1435 qp->s_tail_ack_queue = prev;
1443 * Resend the most recent ACK if this request is
1444 * after all the previous RDMA reads and atomics.
1446 if (i == qp->r_head_ack_queue) {
1447 spin_unlock_irqrestore(&qp->s_lock, flags);
1448 qp->r_nak_state = 0;
1449 qp->r_ack_psn = qp->r_psn - 1;
1453 * Try to send a simple ACK to work around a Mellanox bug
1454 * which doesn't accept a RDMA read response or atomic
1455 * response as an ACK for earlier SENDs or RDMA writes.
1457 if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
1458 !(qp->s_flags & IPATH_S_ACK_PENDING) &&
1459 qp->s_ack_state == OP(ACKNOWLEDGE)) {
1460 spin_unlock_irqrestore(&qp->s_lock, flags);
1461 qp->r_nak_state = 0;
1462 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1466 * Resend the RDMA read or atomic op which
1467 * ACKs this duplicate request.
1469 qp->s_ack_state = OP(ACKNOWLEDGE);
1470 qp->s_tail_ack_queue = i;
1473 qp->r_nak_state = 0;
1474 tasklet_hi_schedule(&qp->s_task);
1477 spin_unlock_irqrestore(&qp->s_lock, flags);
1485 void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
1487 unsigned long flags;
1490 spin_lock_irqsave(&qp->s_lock, flags);
1491 lastwqe = ipath_error_qp(qp, err);
1492 spin_unlock_irqrestore(&qp->s_lock, flags);
1497 ev.device = qp->ibqp.device;
1498 ev.element.qp = &qp->ibqp;
1499 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1500 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1504 static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
1506 unsigned long flags;
1510 if (next > IPATH_MAX_RDMA_ATOMIC)
1512 spin_lock_irqsave(&qp->s_lock, flags);
1513 if (n == qp->s_tail_ack_queue) {
1514 qp->s_tail_ack_queue = next;
1515 qp->s_ack_state = OP(ACKNOWLEDGE);
1517 spin_unlock_irqrestore(&qp->s_lock, flags);
1521 * ipath_rc_rcv - process an incoming RC packet
1522 * @dev: the device this packet came in on
1523 * @hdr: the header of this packet
1524 * @has_grh: true if the header has a GRH
1525 * @data: the packet data
1526 * @tlen: the packet length
1527 * @qp: the QP for this packet
1529 * This is called from ipath_qp_rcv() to process an incoming RC packet
1531 * Called at interrupt level.
1533 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1534 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1536 struct ipath_other_headers *ohdr;
1542 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1544 struct ib_reth *reth;
1547 /* Validate the SLID. See Ch. 9.6.1.5 */
1548 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
1554 hdrsize = 8 + 12; /* LRH + BTH */
1555 psn = be32_to_cpu(ohdr->bth[2]);
1558 ohdr = &hdr->u.l.oth;
1559 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1561 * The header with GRH is 60 bytes and the core driver sets
1562 * the eager header buffer size to 56 bytes so the last 4
1563 * bytes of the BTH header (PSN) is in the data buffer.
1565 header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
1566 if (header_in_data) {
1567 psn = be32_to_cpu(((__be32 *) data)[0]);
1568 data += sizeof(__be32);
1570 psn = be32_to_cpu(ohdr->bth[2]);
1574 * Process responses (ACKs) before anything else. Note that the
1575 * packet sequence number will be for something in the send work
1576 * queue rather than the expected receive packet sequence number.
1577 * In other words, this QP is the requester.
1579 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1580 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1581 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1582 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1583 hdrsize, pmtu, header_in_data);
1587 /* Compute 24 bits worth of difference. */
1588 diff = ipath_cmp24(psn, qp->r_psn);
1589 if (unlikely(diff)) {
1590 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1591 psn, diff, header_in_data))
1596 /* Check for opcode sequence errors. */
1597 switch (qp->r_state) {
1598 case OP(SEND_FIRST):
1599 case OP(SEND_MIDDLE):
1600 if (opcode == OP(SEND_MIDDLE) ||
1601 opcode == OP(SEND_LAST) ||
1602 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1606 case OP(RDMA_WRITE_FIRST):
1607 case OP(RDMA_WRITE_MIDDLE):
1608 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1609 opcode == OP(RDMA_WRITE_LAST) ||
1610 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1615 if (opcode == OP(SEND_MIDDLE) ||
1616 opcode == OP(SEND_LAST) ||
1617 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1618 opcode == OP(RDMA_WRITE_MIDDLE) ||
1619 opcode == OP(RDMA_WRITE_LAST) ||
1620 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1623 * Note that it is up to the requester to not send a new
1624 * RDMA read or atomic operation before receiving an ACK
1625 * for the previous operation.
1630 memset(&wc, 0, sizeof wc);
1632 /* OK, process the packet. */
1634 case OP(SEND_FIRST):
1635 if (!ipath_get_rwqe(qp, 0))
1639 case OP(SEND_MIDDLE):
1640 case OP(RDMA_WRITE_MIDDLE):
1642 /* Check for invalid length PMTU or posted rwqe len. */
1643 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1645 qp->r_rcv_len += pmtu;
1646 if (unlikely(qp->r_rcv_len > qp->r_len))
1648 ipath_copy_sge(&qp->r_sge, data, pmtu);
1651 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1653 if (!ipath_get_rwqe(qp, 1))
1658 case OP(SEND_ONLY_WITH_IMMEDIATE):
1659 if (!ipath_get_rwqe(qp, 0))
1662 if (opcode == OP(SEND_ONLY))
1665 case OP(SEND_LAST_WITH_IMMEDIATE):
1667 if (header_in_data) {
1668 wc.imm_data = *(__be32 *) data;
1669 data += sizeof(__be32);
1671 /* Immediate data comes after BTH */
1672 wc.imm_data = ohdr->u.imm_data;
1675 wc.wc_flags = IB_WC_WITH_IMM;
1678 case OP(RDMA_WRITE_LAST):
1680 /* Get the number of bytes the message was padded by. */
1681 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1682 /* Check for invalid length. */
1683 /* XXX LAST len should be >= 1 */
1684 if (unlikely(tlen < (hdrsize + pad + 4)))
1686 /* Don't count the CRC. */
1687 tlen -= (hdrsize + pad + 4);
1688 wc.byte_len = tlen + qp->r_rcv_len;
1689 if (unlikely(wc.byte_len > qp->r_len))
1691 ipath_copy_sge(&qp->r_sge, data, tlen);
1693 if (!qp->r_wrid_valid)
1695 qp->r_wrid_valid = 0;
1696 wc.wr_id = qp->r_wr_id;
1697 wc.status = IB_WC_SUCCESS;
1698 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1699 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1700 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1702 wc.opcode = IB_WC_RECV;
1704 wc.src_qp = qp->remote_qpn;
1705 wc.slid = qp->remote_ah_attr.dlid;
1706 wc.sl = qp->remote_ah_attr.sl;
1707 /* Signal completion event if the solicited bit is set. */
1708 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1710 __constant_cpu_to_be32(1 << 23)) != 0);
1713 case OP(RDMA_WRITE_FIRST):
1714 case OP(RDMA_WRITE_ONLY):
1715 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1716 if (unlikely(!(qp->qp_access_flags &
1717 IB_ACCESS_REMOTE_WRITE)))
1720 /* RETH comes after BTH */
1721 if (!header_in_data)
1722 reth = &ohdr->u.rc.reth;
1724 reth = (struct ib_reth *)data;
1725 data += sizeof(*reth);
1727 hdrsize += sizeof(*reth);
1728 qp->r_len = be32_to_cpu(reth->length);
1730 if (qp->r_len != 0) {
1731 u32 rkey = be32_to_cpu(reth->rkey);
1732 u64 vaddr = be64_to_cpu(reth->vaddr);
1735 /* Check rkey & NAK */
1736 ok = ipath_rkey_ok(qp, &qp->r_sge,
1737 qp->r_len, vaddr, rkey,
1738 IB_ACCESS_REMOTE_WRITE);
1742 qp->r_sge.sg_list = NULL;
1743 qp->r_sge.sge.mr = NULL;
1744 qp->r_sge.sge.vaddr = NULL;
1745 qp->r_sge.sge.length = 0;
1746 qp->r_sge.sge.sge_length = 0;
1748 if (opcode == OP(RDMA_WRITE_FIRST))
1750 else if (opcode == OP(RDMA_WRITE_ONLY))
1752 if (!ipath_get_rwqe(qp, 1))
1756 case OP(RDMA_READ_REQUEST): {
1757 struct ipath_ack_entry *e;
1761 if (unlikely(!(qp->qp_access_flags &
1762 IB_ACCESS_REMOTE_READ)))
1764 next = qp->r_head_ack_queue + 1;
1765 if (next > IPATH_MAX_RDMA_ATOMIC)
1767 if (unlikely(next == qp->s_tail_ack_queue)) {
1768 if (!qp->s_ack_queue[next].sent)
1770 ipath_update_ack_queue(qp, next);
1772 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1773 /* RETH comes after BTH */
1774 if (!header_in_data)
1775 reth = &ohdr->u.rc.reth;
1777 reth = (struct ib_reth *)data;
1778 data += sizeof(*reth);
1780 len = be32_to_cpu(reth->length);
1782 u32 rkey = be32_to_cpu(reth->rkey);
1783 u64 vaddr = be64_to_cpu(reth->vaddr);
1786 /* Check rkey & NAK */
1787 ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1788 rkey, IB_ACCESS_REMOTE_READ);
1792 * Update the next expected PSN. We add 1 later
1793 * below, so only add the remainder here.
1796 qp->r_psn += (len - 1) / pmtu;
1798 e->rdma_sge.sg_list = NULL;
1799 e->rdma_sge.num_sge = 0;
1800 e->rdma_sge.sge.mr = NULL;
1801 e->rdma_sge.sge.vaddr = NULL;
1802 e->rdma_sge.sge.length = 0;
1803 e->rdma_sge.sge.sge_length = 0;
1809 * We need to increment the MSN here instead of when we
1810 * finish sending the result since a duplicate request would
1811 * increment it more than once.
1815 qp->r_state = opcode;
1816 qp->r_nak_state = 0;
1818 qp->r_head_ack_queue = next;
1820 /* Call ipath_do_rc_send() in another thread. */
1821 tasklet_hi_schedule(&qp->s_task);
1826 case OP(COMPARE_SWAP):
1827 case OP(FETCH_ADD): {
1828 struct ib_atomic_eth *ateth;
1829 struct ipath_ack_entry *e;
1836 if (unlikely(!(qp->qp_access_flags &
1837 IB_ACCESS_REMOTE_ATOMIC)))
1839 next = qp->r_head_ack_queue + 1;
1840 if (next > IPATH_MAX_RDMA_ATOMIC)
1842 if (unlikely(next == qp->s_tail_ack_queue)) {
1843 if (!qp->s_ack_queue[next].sent)
1845 ipath_update_ack_queue(qp, next);
1847 if (!header_in_data)
1848 ateth = &ohdr->u.atomic_eth;
1850 ateth = (struct ib_atomic_eth *)data;
1851 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
1852 be32_to_cpu(ateth->vaddr[1]);
1853 if (unlikely(vaddr & (sizeof(u64) - 1)))
1855 rkey = be32_to_cpu(ateth->rkey);
1856 /* Check rkey & NAK */
1857 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
1858 sizeof(u64), vaddr, rkey,
1859 IB_ACCESS_REMOTE_ATOMIC)))
1861 /* Perform atomic OP and save result. */
1862 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
1863 sdata = be64_to_cpu(ateth->swap_data);
1864 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1865 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
1866 (u64) atomic64_add_return(sdata, maddr) - sdata :
1867 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
1868 be64_to_cpu(ateth->compare_data),
1872 e->psn = psn & IPATH_PSN_MASK;
1875 qp->r_state = opcode;
1876 qp->r_nak_state = 0;
1878 qp->r_head_ack_queue = next;
1880 /* Call ipath_do_rc_send() in another thread. */
1881 tasklet_hi_schedule(&qp->s_task);
1887 /* NAK unknown opcodes. */
1891 qp->r_state = opcode;
1892 qp->r_ack_psn = psn;
1893 qp->r_nak_state = 0;
1894 /* Send an ACK if requested or required. */
1895 if (psn & (1 << 31))
1900 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1901 qp->r_ack_psn = qp->r_psn;
1905 ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
1906 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
1907 qp->r_ack_psn = qp->r_psn;
1911 ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
1912 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
1913 qp->r_ack_psn = qp->r_psn;