]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - net/sunrpc/xprtrdma/svc_rdma_transport.c
svcrdma: Free context on ib_post_recv error
[linux-2.6-omap-h63xx.git] / net / sunrpc / xprtrdma / svc_rdma_transport.c
index 16fd3f6718ff55746168c732ec3f367ef9b61f9a..e85ac77f4954860a5bbe5c5f8b5652ea2f31fe0e 100644 (file)
@@ -228,23 +228,8 @@ static void dto_tasklet_func(unsigned long data)
                list_del_init(&xprt->sc_dto_q);
                spin_unlock_irqrestore(&dto_lock, flags);
 
-               if (test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) {
-                       ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
-                       rq_cq_reap(xprt);
-                       set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
-                       /*
-                        * If data arrived before established event,
-                        * don't enqueue. This defers RPC I/O until the
-                        * RDMA connection is complete.
-                        */
-                       if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
-                               svc_xprt_enqueue(&xprt->sc_xprt);
-               }
-
-               if (test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) {
-                       ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
-                       sq_cq_reap(xprt);
-               }
+               rq_cq_reap(xprt);
+               sq_cq_reap(xprt);
 
                svc_xprt_put(&xprt->sc_xprt);
                spin_lock_irqsave(&dto_lock, flags);
@@ -297,6 +282,10 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
        struct ib_wc wc;
        struct svc_rdma_op_ctxt *ctxt = NULL;
 
+       if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
+               return;
+
+       ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
        atomic_inc(&rdma_stat_rq_poll);
 
        spin_lock_bh(&xprt->sc_rq_dto_lock);
@@ -316,6 +305,15 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
 
        if (ctxt)
                atomic_inc(&rdma_stat_rq_prod);
+
+       set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
+       /*
+        * If data arrived before established event,
+        * don't enqueue. This defers RPC I/O until the
+        * RDMA connection is complete.
+        */
+       if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
+               svc_xprt_enqueue(&xprt->sc_xprt);
 }
 
 /*
@@ -328,6 +326,11 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
        struct ib_cq *cq = xprt->sc_sq_cq;
        int ret;
 
+
+       if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
+               return;
+
+       ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
        atomic_inc(&rdma_stat_sq_poll);
        while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
                ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
@@ -521,6 +524,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
        recv_wr.wr_id = (u64)(unsigned long)ctxt;
 
        ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
+       if (ret)
+               svc_rdma_put_context(ctxt, 1);
        return ret;
 }
 
@@ -627,6 +632,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
                if (xprt) {
                        set_bit(XPT_CLOSE, &xprt->xpt_flags);
                        svc_xprt_enqueue(xprt);
+                       svc_xprt_put(xprt);
                }
                break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
@@ -910,27 +916,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        return NULL;
 }
 
-/*
- * Post an RQ WQE to the RQ when the rqst is being released. This
- * effectively returns an RQ credit to the client. The rq_xprt_ctxt
- * will be null if the request is deferred due to an RDMA_READ or the
- * transport had no data ready (EAGAIN). Note that an RPC deferred in
- * svc_process will still return the credit, this is because the data
- * is copied and no longer consume a WQE/WC.
- */
 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
 {
-       int err;
-       struct svcxprt_rdma *rdma =
-               container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
-       if (rqstp->rq_xprt_ctxt) {
-               BUG_ON(rqstp->rq_xprt_ctxt != rdma);
-               err = svc_rdma_post_recv(rdma);
-               if (err)
-                       dprintk("svcrdma: failed to post an RQ WQE error=%d\n",
-                               err);
-       }
-       rqstp->rq_xprt_ctxt = NULL;
 }
 
 /*
@@ -1018,7 +1005,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
        int ret;
 
        if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
-               return 0;
+               return -ENOTCONN;
 
        BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
        BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op !=
@@ -1029,13 +1016,16 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
                if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) {
                        spin_unlock_bh(&xprt->sc_lock);
                        atomic_inc(&rdma_stat_sq_starve);
-                       /* See if we can reap some SQ WR */
+
+                       /* See if we can opportunistically reap SQ WR to make room */
                        sq_cq_reap(xprt);
 
                        /* Wait until SQ WR available if SQ still full */
                        wait_event(xprt->sc_send_wait,
                                   atomic_read(&xprt->sc_sq_count) <
                                   xprt->sc_sq_depth);
+                       if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
+                               return 0;
                        continue;
                }
                /* Bumped used SQ WR count and post */