2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_cm.h>
36 #include <rdma/ib_cache.h>
39 #include <linux/icmpv6.h>
41 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
42 static int data_debug_level;
44 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
45 MODULE_PARM_DESC(cm_data_debug_level,
46 "Enable data path debug tracing for connected mode if > 0");
51 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
53 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
54 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
55 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
56 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
65 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
66 struct ib_cm_event *event);
68 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
69 u64 mapping[IPOIB_CM_RX_SG])
73 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
75 for (i = 0; i < frags; ++i)
76 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
79 static int ipoib_cm_post_receive(struct net_device *dev, int id)
81 struct ipoib_dev_priv *priv = netdev_priv(dev);
82 struct ib_recv_wr *bad_wr;
85 priv->cm.rx_wr.wr_id = id | IPOIB_CM_OP_SRQ;
87 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
88 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
90 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
92 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
93 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
94 priv->cm.srq_ring[id].mapping);
95 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
96 priv->cm.srq_ring[id].skb = NULL;
102 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags,
103 u64 mapping[IPOIB_CM_RX_SG])
105 struct ipoib_dev_priv *priv = netdev_priv(dev);
109 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
114 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
115 * IP header to a multiple of 16.
117 skb_reserve(skb, 12);
119 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
121 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
122 dev_kfree_skb_any(skb);
126 for (i = 0; i < frags; i++) {
127 struct page *page = alloc_page(GFP_ATOMIC);
131 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
133 mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
134 0, PAGE_SIZE, DMA_FROM_DEVICE);
135 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
139 priv->cm.srq_ring[id].skb = skb;
144 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
147 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
149 dev_kfree_skb_any(skb);
153 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
154 struct ipoib_cm_rx *p)
156 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct ib_qp_init_attr attr = {
158 .send_cq = priv->cq, /* does not matter, we never send anything */
161 .cap.max_send_wr = 1, /* FIXME: 0 Seems not to work */
162 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
163 .sq_sig_type = IB_SIGNAL_ALL_WR,
164 .qp_type = IB_QPT_RC,
167 return ib_create_qp(priv->pd, &attr);
170 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
171 struct ib_cm_id *cm_id, struct ib_qp *qp,
174 struct ipoib_dev_priv *priv = netdev_priv(dev);
175 struct ib_qp_attr qp_attr;
176 int qp_attr_mask, ret;
178 qp_attr.qp_state = IB_QPS_INIT;
179 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
181 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
184 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
186 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
189 qp_attr.qp_state = IB_QPS_RTR;
190 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
192 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
195 qp_attr.rq_psn = psn;
196 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
198 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
204 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
205 struct ib_qp *qp, struct ib_cm_req_event_param *req,
208 struct ipoib_dev_priv *priv = netdev_priv(dev);
209 struct ipoib_cm_data data = {};
210 struct ib_cm_rep_param rep = {};
212 data.qpn = cpu_to_be32(priv->qp->qp_num);
213 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
215 rep.private_data = &data;
216 rep.private_data_len = sizeof data;
217 rep.flow_control = 0;
218 rep.rnr_retry_count = req->rnr_retry_count;
219 rep.target_ack_delay = 20; /* FIXME */
221 rep.qp_num = qp->qp_num;
222 rep.starting_psn = psn;
223 return ib_send_cm_rep(cm_id, &rep);
226 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
228 struct net_device *dev = cm_id->context;
229 struct ipoib_dev_priv *priv = netdev_priv(dev);
230 struct ipoib_cm_rx *p;
234 ipoib_dbg(priv, "REQ arrived\n");
235 p = kzalloc(sizeof *p, GFP_KERNEL);
240 p->qp = ipoib_cm_create_rx_qp(dev, p);
242 ret = PTR_ERR(p->qp);
246 psn = random32() & 0xffffff;
247 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
251 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
253 ipoib_warn(priv, "failed to send REP: %d\n", ret);
258 p->jiffies = jiffies;
259 spin_lock_irq(&priv->lock);
260 if (list_empty(&priv->cm.passive_ids))
261 queue_delayed_work(ipoib_workqueue,
262 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
263 list_add(&p->list, &priv->cm.passive_ids);
264 spin_unlock_irq(&priv->lock);
269 ib_destroy_qp(p->qp);
275 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
276 struct ib_cm_event *event)
278 struct ipoib_cm_rx *p;
279 struct ipoib_dev_priv *priv;
282 switch (event->event) {
283 case IB_CM_REQ_RECEIVED:
284 return ipoib_cm_req_handler(cm_id, event);
285 case IB_CM_DREQ_RECEIVED:
287 ib_send_cm_drep(cm_id, NULL, 0);
289 case IB_CM_REJ_RECEIVED:
291 priv = netdev_priv(p->dev);
292 spin_lock_irq(&priv->lock);
293 if (list_empty(&p->list))
294 ret = 0; /* Connection is going away already. */
296 list_del_init(&p->list);
299 spin_unlock_irq(&priv->lock);
301 ib_destroy_qp(p->qp);
310 /* Adjust length of skb with fragments to match received data */
311 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
312 unsigned int length, struct sk_buff *toskb)
317 /* put header into skb */
318 size = min(length, hdr_space);
323 num_frags = skb_shinfo(skb)->nr_frags;
324 for (i = 0; i < num_frags; i++) {
325 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
328 /* don't need this page */
329 skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
330 --skb_shinfo(skb)->nr_frags;
332 size = min(length, (unsigned) PAGE_SIZE);
335 skb->data_len += size;
336 skb->truesize += size;
343 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
345 struct ipoib_dev_priv *priv = netdev_priv(dev);
346 unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ;
347 struct sk_buff *skb, *newskb;
348 struct ipoib_cm_rx *p;
350 u64 mapping[IPOIB_CM_RX_SG];
353 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
356 if (unlikely(wr_id >= ipoib_recvq_size)) {
357 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
358 wr_id, ipoib_recvq_size);
362 skb = priv->cm.srq_ring[wr_id].skb;
364 if (unlikely(wc->status != IB_WC_SUCCESS)) {
365 ipoib_dbg(priv, "cm recv error "
366 "(status=%d, wrid=%d vend_err %x)\n",
367 wc->status, wr_id, wc->vendor_err);
368 ++priv->stats.rx_dropped;
372 if (!likely(wr_id & IPOIB_CM_RX_UPDATE_MASK)) {
373 p = wc->qp->qp_context;
374 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
375 spin_lock_irqsave(&priv->lock, flags);
376 p->jiffies = jiffies;
377 /* Move this entry to list head, but do
378 * not re-add it if it has been removed. */
379 if (!list_empty(&p->list))
380 list_move(&p->list, &priv->cm.passive_ids);
381 spin_unlock_irqrestore(&priv->lock, flags);
385 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
386 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
388 newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping);
389 if (unlikely(!newskb)) {
391 * If we can't allocate a new RX buffer, dump
392 * this packet and reuse the old buffer.
394 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
395 ++priv->stats.rx_dropped;
399 ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping);
400 memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
402 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
403 wc->byte_len, wc->slid);
405 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
407 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
408 skb_reset_mac_header(skb);
409 skb_pull(skb, IPOIB_ENCAP_LEN);
411 dev->last_rx = jiffies;
412 ++priv->stats.rx_packets;
413 priv->stats.rx_bytes += skb->len;
416 /* XXX get correct PACKET_ type here */
417 skb->pkt_type = PACKET_HOST;
418 netif_receive_skb(skb);
421 if (unlikely(ipoib_cm_post_receive(dev, wr_id)))
422 ipoib_warn(priv, "ipoib_cm_post_receive failed "
423 "for buf %d\n", wr_id);
426 static inline int post_send(struct ipoib_dev_priv *priv,
427 struct ipoib_cm_tx *tx,
431 struct ib_send_wr *bad_wr;
433 priv->tx_sge.addr = addr;
434 priv->tx_sge.length = len;
436 priv->tx_wr.wr_id = wr_id;
438 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
441 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
443 struct ipoib_dev_priv *priv = netdev_priv(dev);
444 struct ipoib_tx_buf *tx_req;
447 if (unlikely(skb->len > tx->mtu)) {
448 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
450 ++priv->stats.tx_dropped;
451 ++priv->stats.tx_errors;
452 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
456 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
457 tx->tx_head, skb->len, tx->qp->qp_num);
460 * We put the skb into the tx_ring _before_ we call post_send()
461 * because it's entirely possible that the completion handler will
462 * run before we execute anything after the post_send(). That
463 * means we have to make sure everything is properly recorded and
464 * our state is consistent before we call post_send().
466 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
468 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
469 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
470 ++priv->stats.tx_errors;
471 dev_kfree_skb_any(skb);
475 tx_req->mapping = addr;
477 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
479 ipoib_warn(priv, "post_send failed\n");
480 ++priv->stats.tx_errors;
481 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
482 dev_kfree_skb_any(skb);
484 dev->trans_start = jiffies;
487 if (tx->tx_head - tx->tx_tail == ipoib_sendq_size) {
488 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
490 netif_stop_queue(dev);
491 set_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags);
496 static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx,
499 struct ipoib_dev_priv *priv = netdev_priv(dev);
500 unsigned int wr_id = wc->wr_id;
501 struct ipoib_tx_buf *tx_req;
504 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
507 if (unlikely(wr_id >= ipoib_sendq_size)) {
508 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
509 wr_id, ipoib_sendq_size);
513 tx_req = &tx->tx_ring[wr_id];
515 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
517 /* FIXME: is this right? Shouldn't we only increment on success? */
518 ++priv->stats.tx_packets;
519 priv->stats.tx_bytes += tx_req->skb->len;
521 dev_kfree_skb_any(tx_req->skb);
523 spin_lock_irqsave(&priv->tx_lock, flags);
525 if (unlikely(test_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags)) &&
526 tx->tx_head - tx->tx_tail <= ipoib_sendq_size >> 1) {
527 clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags);
528 netif_wake_queue(dev);
531 if (wc->status != IB_WC_SUCCESS &&
532 wc->status != IB_WC_WR_FLUSH_ERR) {
533 struct ipoib_neigh *neigh;
535 ipoib_dbg(priv, "failed cm send event "
536 "(status=%d, wrid=%d vend_err %x)\n",
537 wc->status, wr_id, wc->vendor_err);
539 spin_lock(&priv->lock);
544 list_del(&neigh->list);
546 ipoib_put_ah(neigh->ah);
547 ipoib_neigh_free(dev, neigh);
552 /* queue would be re-started anyway when TX is destroyed,
553 * but it makes sense to do it ASAP here. */
554 if (test_and_clear_bit(IPOIB_FLAG_NETIF_STOPPED, &tx->flags))
555 netif_wake_queue(dev);
557 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
558 list_move(&tx->list, &priv->cm.reap_list);
559 queue_work(ipoib_workqueue, &priv->cm.reap_task);
562 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
564 spin_unlock(&priv->lock);
567 spin_unlock_irqrestore(&priv->tx_lock, flags);
570 static void ipoib_cm_tx_completion(struct ib_cq *cq, void *tx_ptr)
572 struct ipoib_cm_tx *tx = tx_ptr;
575 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
577 n = ib_poll_cq(cq, IPOIB_NUM_WC, tx->ibwc);
578 for (i = 0; i < n; ++i)
579 ipoib_cm_handle_tx_wc(tx->dev, tx, tx->ibwc + i);
580 } while (n == IPOIB_NUM_WC);
583 int ipoib_cm_dev_open(struct net_device *dev)
585 struct ipoib_dev_priv *priv = netdev_priv(dev);
588 if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
591 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
592 if (IS_ERR(priv->cm.id)) {
593 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
594 ret = PTR_ERR(priv->cm.id);
599 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
602 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
603 IPOIB_CM_IETF_ID | priv->qp->qp_num);
604 ib_destroy_cm_id(priv->cm.id);
611 void ipoib_cm_dev_stop(struct net_device *dev)
613 struct ipoib_dev_priv *priv = netdev_priv(dev);
614 struct ipoib_cm_rx *p;
616 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
619 ib_destroy_cm_id(priv->cm.id);
621 spin_lock_irq(&priv->lock);
622 while (!list_empty(&priv->cm.passive_ids)) {
623 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
624 list_del_init(&p->list);
625 spin_unlock_irq(&priv->lock);
626 ib_destroy_cm_id(p->id);
627 ib_destroy_qp(p->qp);
629 spin_lock_irq(&priv->lock);
631 spin_unlock_irq(&priv->lock);
633 cancel_delayed_work(&priv->cm.stale_task);
636 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
638 struct ipoib_cm_tx *p = cm_id->context;
639 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
640 struct ipoib_cm_data *data = event->private_data;
641 struct sk_buff_head skqueue;
642 struct ib_qp_attr qp_attr;
643 int qp_attr_mask, ret;
646 p->mtu = be32_to_cpu(data->mtu);
648 if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) {
649 ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n",
650 p->mtu, priv->dev->mtu);
654 qp_attr.qp_state = IB_QPS_RTR;
655 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
657 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
661 qp_attr.rq_psn = 0 /* FIXME */;
662 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
664 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
668 qp_attr.qp_state = IB_QPS_RTS;
669 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
671 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
674 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
676 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
680 skb_queue_head_init(&skqueue);
682 spin_lock_irq(&priv->lock);
683 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
685 while ((skb = __skb_dequeue(&p->neigh->queue)))
686 __skb_queue_tail(&skqueue, skb);
687 spin_unlock_irq(&priv->lock);
689 while ((skb = __skb_dequeue(&skqueue))) {
691 if (dev_queue_xmit(skb))
692 ipoib_warn(priv, "dev_queue_xmit failed "
693 "to requeue packet\n");
696 ret = ib_send_cm_rtu(cm_id, NULL, 0);
698 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
704 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ib_cq *cq)
706 struct ipoib_dev_priv *priv = netdev_priv(dev);
707 struct ib_qp_init_attr attr = {};
708 attr.recv_cq = priv->cq;
709 attr.srq = priv->cm.srq;
710 attr.cap.max_send_wr = ipoib_sendq_size;
711 attr.cap.max_send_sge = 1;
712 attr.sq_sig_type = IB_SIGNAL_ALL_WR;
713 attr.qp_type = IB_QPT_RC;
715 return ib_create_qp(priv->pd, &attr);
718 static int ipoib_cm_send_req(struct net_device *dev,
719 struct ib_cm_id *id, struct ib_qp *qp,
721 struct ib_sa_path_rec *pathrec)
723 struct ipoib_dev_priv *priv = netdev_priv(dev);
724 struct ipoib_cm_data data = {};
725 struct ib_cm_req_param req = {};
727 data.qpn = cpu_to_be32(priv->qp->qp_num);
728 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
730 req.primary_path = pathrec;
731 req.alternate_path = NULL;
732 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
733 req.qp_num = qp->qp_num;
734 req.qp_type = qp->qp_type;
735 req.private_data = &data;
736 req.private_data_len = sizeof data;
737 req.flow_control = 0;
739 req.starting_psn = 0; /* FIXME */
742 * Pick some arbitrary defaults here; we could make these
743 * module parameters if anyone cared about setting them.
745 req.responder_resources = 4;
746 req.remote_cm_response_timeout = 20;
747 req.local_cm_response_timeout = 20;
748 req.retry_count = 0; /* RFC draft warns against retries */
749 req.rnr_retry_count = 0; /* RFC draft warns against retries */
750 req.max_cm_retries = 15;
752 return ib_send_cm_req(id, &req);
755 static int ipoib_cm_modify_tx_init(struct net_device *dev,
756 struct ib_cm_id *cm_id, struct ib_qp *qp)
758 struct ipoib_dev_priv *priv = netdev_priv(dev);
759 struct ib_qp_attr qp_attr;
760 int qp_attr_mask, ret;
761 ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
763 ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret);
767 qp_attr.qp_state = IB_QPS_INIT;
768 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
769 qp_attr.port_num = priv->port;
770 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
772 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
774 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
780 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
781 struct ib_sa_path_rec *pathrec)
783 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
786 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring,
789 ipoib_warn(priv, "failed to allocate tx ring\n");
794 p->cq = ib_create_cq(priv->ca, ipoib_cm_tx_completion, NULL, p,
795 ipoib_sendq_size + 1, 0);
797 ret = PTR_ERR(p->cq);
798 ipoib_warn(priv, "failed to allocate tx cq: %d\n", ret);
802 ret = ib_req_notify_cq(p->cq, IB_CQ_NEXT_COMP);
804 ipoib_warn(priv, "failed to request completion notification: %d\n", ret);
808 p->qp = ipoib_cm_create_tx_qp(p->dev, p->cq);
810 ret = PTR_ERR(p->qp);
811 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
815 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
817 ret = PTR_ERR(p->id);
818 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
822 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
824 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
828 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
830 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
834 ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n",
835 p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn);
841 ib_destroy_cm_id(p->id);
844 ib_destroy_qp(p->qp);
848 ib_destroy_cq(p->cq);
855 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
857 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
858 struct ipoib_tx_buf *tx_req;
860 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
861 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
864 ib_destroy_cm_id(p->id);
867 ib_destroy_qp(p->qp);
870 ib_destroy_cq(p->cq);
872 if (test_bit(IPOIB_FLAG_NETIF_STOPPED, &p->flags))
873 netif_wake_queue(p->dev);
876 while ((int) p->tx_tail - (int) p->tx_head < 0) {
877 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
878 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
880 dev_kfree_skb_any(tx_req->skb);
890 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
891 struct ib_cm_event *event)
893 struct ipoib_cm_tx *tx = cm_id->context;
894 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
895 struct net_device *dev = priv->dev;
896 struct ipoib_neigh *neigh;
899 switch (event->event) {
900 case IB_CM_DREQ_RECEIVED:
901 ipoib_dbg(priv, "DREQ received.\n");
902 ib_send_cm_drep(cm_id, NULL, 0);
904 case IB_CM_REP_RECEIVED:
905 ipoib_dbg(priv, "REP received.\n");
906 ret = ipoib_cm_rep_handler(cm_id, event);
908 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
911 case IB_CM_REQ_ERROR:
912 case IB_CM_REJ_RECEIVED:
913 case IB_CM_TIMEWAIT_EXIT:
914 ipoib_dbg(priv, "CM error %d.\n", event->event);
915 spin_lock_irq(&priv->tx_lock);
916 spin_lock(&priv->lock);
921 list_del(&neigh->list);
923 ipoib_put_ah(neigh->ah);
924 ipoib_neigh_free(dev, neigh);
929 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
930 list_move(&tx->list, &priv->cm.reap_list);
931 queue_work(ipoib_workqueue, &priv->cm.reap_task);
934 spin_unlock(&priv->lock);
935 spin_unlock_irq(&priv->tx_lock);
944 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
945 struct ipoib_neigh *neigh)
947 struct ipoib_dev_priv *priv = netdev_priv(dev);
948 struct ipoib_cm_tx *tx;
950 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
958 list_add(&tx->list, &priv->cm.start_list);
959 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
960 queue_work(ipoib_workqueue, &priv->cm.start_task);
964 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
966 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
967 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
968 list_move(&tx->list, &priv->cm.reap_list);
969 queue_work(ipoib_workqueue, &priv->cm.reap_task);
970 ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n",
971 IPOIB_GID_ARG(tx->neigh->dgid));
976 static void ipoib_cm_tx_start(struct work_struct *work)
978 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
980 struct net_device *dev = priv->dev;
981 struct ipoib_neigh *neigh;
982 struct ipoib_cm_tx *p;
986 struct ib_sa_path_rec pathrec;
989 spin_lock_irqsave(&priv->tx_lock, flags);
990 spin_lock(&priv->lock);
991 while (!list_empty(&priv->cm.start_list)) {
992 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
993 list_del_init(&p->list);
995 qpn = IPOIB_QPN(neigh->neighbour->ha);
996 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
997 spin_unlock(&priv->lock);
998 spin_unlock_irqrestore(&priv->tx_lock, flags);
999 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1000 spin_lock_irqsave(&priv->tx_lock, flags);
1001 spin_lock(&priv->lock);
1006 list_del(&neigh->list);
1008 ipoib_put_ah(neigh->ah);
1009 ipoib_neigh_free(dev, neigh);
1015 spin_unlock(&priv->lock);
1016 spin_unlock_irqrestore(&priv->tx_lock, flags);
1019 static void ipoib_cm_tx_reap(struct work_struct *work)
1021 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1023 struct ipoib_cm_tx *p;
1025 spin_lock_irq(&priv->tx_lock);
1026 spin_lock(&priv->lock);
1027 while (!list_empty(&priv->cm.reap_list)) {
1028 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1030 spin_unlock(&priv->lock);
1031 spin_unlock_irq(&priv->tx_lock);
1032 ipoib_cm_tx_destroy(p);
1033 spin_lock_irq(&priv->tx_lock);
1034 spin_lock(&priv->lock);
1036 spin_unlock(&priv->lock);
1037 spin_unlock_irq(&priv->tx_lock);
1040 static void ipoib_cm_skb_reap(struct work_struct *work)
1042 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1044 struct net_device *dev = priv->dev;
1045 struct sk_buff *skb;
1047 unsigned mtu = priv->mcast_mtu;
1049 spin_lock_irq(&priv->tx_lock);
1050 spin_lock(&priv->lock);
1051 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1052 spin_unlock(&priv->lock);
1053 spin_unlock_irq(&priv->tx_lock);
1054 if (skb->protocol == htons(ETH_P_IP))
1055 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1056 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1057 else if (skb->protocol == htons(ETH_P_IPV6))
1058 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1060 dev_kfree_skb_any(skb);
1061 spin_lock_irq(&priv->tx_lock);
1062 spin_lock(&priv->lock);
1064 spin_unlock(&priv->lock);
1065 spin_unlock_irq(&priv->tx_lock);
1068 void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
1071 struct ipoib_dev_priv *priv = netdev_priv(dev);
1072 int e = skb_queue_empty(&priv->cm.skb_queue);
1075 skb->dst->ops->update_pmtu(skb->dst, mtu);
1077 skb_queue_tail(&priv->cm.skb_queue, skb);
1079 queue_work(ipoib_workqueue, &priv->cm.skb_task);
1082 static void ipoib_cm_stale_task(struct work_struct *work)
1084 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1085 cm.stale_task.work);
1086 struct ipoib_cm_rx *p;
1088 spin_lock_irq(&priv->lock);
1089 while (!list_empty(&priv->cm.passive_ids)) {
1090 /* List if sorted by LRU, start from tail,
1091 * stop when we see a recently used entry */
1092 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1093 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1095 list_del_init(&p->list);
1096 spin_unlock_irq(&priv->lock);
1097 ib_destroy_cm_id(p->id);
1098 ib_destroy_qp(p->qp);
1100 spin_lock_irq(&priv->lock);
1103 if (!list_empty(&priv->cm.passive_ids))
1104 queue_delayed_work(ipoib_workqueue,
1105 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1106 spin_unlock_irq(&priv->lock);
1110 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1113 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
1115 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1116 return sprintf(buf, "connected\n");
1118 return sprintf(buf, "datagram\n");
1121 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1122 const char *buf, size_t count)
1124 struct net_device *dev = to_net_dev(d);
1125 struct ipoib_dev_priv *priv = netdev_priv(dev);
1127 /* flush paths if we switch modes so that connections are restarted */
1128 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
1129 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1130 ipoib_warn(priv, "enabling connected mode "
1131 "will cause multicast packet drops\n");
1132 ipoib_flush_paths(dev);
1136 if (!strcmp(buf, "datagram\n")) {
1137 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
1138 dev->mtu = min(priv->mcast_mtu, dev->mtu);
1139 ipoib_flush_paths(dev);
1146 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1148 int ipoib_cm_add_mode_attr(struct net_device *dev)
1150 return device_create_file(&dev->dev, &dev_attr_mode);
1153 int ipoib_cm_dev_init(struct net_device *dev)
1155 struct ipoib_dev_priv *priv = netdev_priv(dev);
1156 struct ib_srq_init_attr srq_init_attr = {
1158 .max_wr = ipoib_recvq_size,
1159 .max_sge = IPOIB_CM_RX_SG
1164 INIT_LIST_HEAD(&priv->cm.passive_ids);
1165 INIT_LIST_HEAD(&priv->cm.reap_list);
1166 INIT_LIST_HEAD(&priv->cm.start_list);
1167 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1168 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1169 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1170 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1172 skb_queue_head_init(&priv->cm.skb_queue);
1174 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1175 if (IS_ERR(priv->cm.srq)) {
1176 ret = PTR_ERR(priv->cm.srq);
1177 priv->cm.srq = NULL;
1181 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring,
1183 if (!priv->cm.srq_ring) {
1184 printk(KERN_WARNING "%s: failed to allocate CM ring (%d entries)\n",
1185 priv->ca->name, ipoib_recvq_size);
1186 ipoib_cm_dev_cleanup(dev);
1190 for (i = 0; i < IPOIB_CM_RX_SG; ++i)
1191 priv->cm.rx_sge[i].lkey = priv->mr->lkey;
1193 priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE;
1194 for (i = 1; i < IPOIB_CM_RX_SG; ++i)
1195 priv->cm.rx_sge[i].length = PAGE_SIZE;
1196 priv->cm.rx_wr.next = NULL;
1197 priv->cm.rx_wr.sg_list = priv->cm.rx_sge;
1198 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG;
1200 for (i = 0; i < ipoib_recvq_size; ++i) {
1201 if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
1202 priv->cm.srq_ring[i].mapping)) {
1203 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
1204 ipoib_cm_dev_cleanup(dev);
1207 if (ipoib_cm_post_receive(dev, i)) {
1208 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
1209 ipoib_cm_dev_cleanup(dev);
1214 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1218 void ipoib_cm_dev_cleanup(struct net_device *dev)
1220 struct ipoib_dev_priv *priv = netdev_priv(dev);
1226 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1228 ret = ib_destroy_srq(priv->cm.srq);
1230 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1232 priv->cm.srq = NULL;
1233 if (!priv->cm.srq_ring)
1235 for (i = 0; i < ipoib_recvq_size; ++i)
1236 if (priv->cm.srq_ring[i].skb) {
1237 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
1238 priv->cm.srq_ring[i].mapping);
1239 dev_kfree_skb_any(priv->cm.srq_ring[i].skb);
1240 priv->cm.srq_ring[i].skb = NULL;
1242 kfree(priv->cm.srq_ring);
1243 priv->cm.srq_ring = NULL;