]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - drivers/infiniband/ulp/ipoib/ipoib_ib.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[linux-2.6-omap-h63xx.git] / drivers / infiniband / ulp / ipoib / ipoib_ib.c
index 481e4b6bd9494ba2ab39aee17366d556645c8ffd..1a77e79f6b432748accdbb9a813f9a257aa4c1c6 100644 (file)
@@ -208,7 +208,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
         * this packet and reuse the old buffer.
         */
        if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
-               ++priv->stats.rx_dropped;
+               ++dev->stats.rx_dropped;
                goto repost;
        }
 
@@ -225,8 +225,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        skb_pull(skb, IPOIB_ENCAP_LEN);
 
        dev->last_rx = jiffies;
-       ++priv->stats.rx_packets;
-       priv->stats.rx_bytes += skb->len;
+       ++dev->stats.rx_packets;
+       dev->stats.rx_bytes += skb->len;
 
        skb->dev = dev;
        /* XXX get correct PACKET_ type here */
@@ -260,8 +260,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        ib_dma_unmap_single(priv->ca, tx_req->mapping,
                            tx_req->skb->len, DMA_TO_DEVICE);
 
-       ++priv->stats.tx_packets;
-       priv->stats.tx_bytes += tx_req->skb->len;
+       ++dev->stats.tx_packets;
+       dev->stats.tx_bytes += tx_req->skb->len;
 
        dev_kfree_skb_any(tx_req->skb);
 
@@ -362,8 +362,8 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
        if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
                ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
                           skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
-               ++priv->stats.tx_dropped;
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_dropped;
+               ++dev->stats.tx_errors;
                ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
                return;
        }
@@ -383,7 +383,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
        addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
                                 DMA_TO_DEVICE);
        if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_errors;
                dev_kfree_skb_any(skb);
                return;
        }
@@ -392,7 +392,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
        if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
                               address->ah, qpn, addr, skb->len))) {
                ipoib_warn(priv, "post_send failed\n");
-               ++priv->stats.tx_errors;
+               ++dev->stats.tx_errors;
                ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
                dev_kfree_skb_any(skb);
        } else {
@@ -553,6 +553,14 @@ void ipoib_drain_cq(struct net_device *dev)
        do {
                n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
                for (i = 0; i < n; ++i) {
+                       /*
+                        * Convert any successful completions to flush
+                        * errors to avoid passing packets up the
+                        * stack after bringing the device down.
+                        */
+                       if (priv->ibwc[i].status == IB_WC_SUCCESS)
+                               priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
+
                        if (priv->ibwc[i].wr_id & IPOIB_CM_OP_SRQ)
                                ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
                        else if (priv->ibwc[i].wr_id & IPOIB_OP_RECV)