]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
svcrdma: Cleanup DMA unmapping in error paths.
authorTom Tucker <tom@ogc.us>
Tue, 12 Oct 2010 20:33:57 +0000 (15:33 -0500)
committerJ. Bruce Fields <bfields@redhat.com>
Mon, 18 Oct 2010 23:51:32 +0000 (19:51 -0400)
There are several error paths in the code that do not unmap DMA. This
patch adds calls to svc_rdma_unmap_dma to free these DMA contexts.

Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/sunrpc/xprtrdma/svc_rdma_transport.c

index 926bdb44f3de7226ce2eb78cd9f49881d58198fb..df67211c4bafa34449d7449cf221e354884bd39d 100644 (file)
@@ -495,6 +495,7 @@ next_sge:
                        printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
                               err);
                        set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
+                       svc_rdma_unmap_dma(ctxt);
                        svc_rdma_put_context(ctxt, 0);
                        goto out;
                }
index d4f5e0e43f09faf83387ed717c38f108acb1dd62..249a835b703f1f0ec46c6051ff98249227876509 100644 (file)
@@ -367,6 +367,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
                goto err;
        return 0;
  err:
+       svc_rdma_unmap_dma(ctxt);
+       svc_rdma_put_frmr(xprt, vec->frmr);
        svc_rdma_put_context(ctxt, 0);
        /* Fatal error, close transport */
        return -EIO;
index e87e000e984c7a88c8a4395fcbe9b23ad5fbd34b..22f65cc46fe5c2ffa331d6b6dac72a5382437024 100644 (file)
@@ -512,9 +512,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
                ctxt->sge[sge_no].addr = pa;
                ctxt->sge[sge_no].length = PAGE_SIZE;
                ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
+               ctxt->count = sge_no + 1;
                buflen += PAGE_SIZE;
        }
-       ctxt->count = sge_no;
        recv_wr.next = NULL;
        recv_wr.sg_list = &ctxt->sge[0];
        recv_wr.num_sge = ctxt->count;
@@ -530,6 +530,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
        return ret;
 
  err_put_ctxt:
+       svc_rdma_unmap_dma(ctxt);
        svc_rdma_put_context(ctxt, 1);
        return -ENOMEM;
 }
@@ -1308,7 +1309,6 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
                         enum rpcrdma_errcode err)
 {
        struct ib_send_wr err_wr;
-       struct ib_sge sge;
        struct page *p;
        struct svc_rdma_op_ctxt *ctxt;
        u32 *va;
@@ -1321,26 +1321,27 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
        /* XDR encode error */
        length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
 
+       ctxt = svc_rdma_get_context(xprt);
+       ctxt->direction = DMA_FROM_DEVICE;
+       ctxt->count = 1;
+       ctxt->pages[0] = p;
+
        /* Prepare SGE for local address */
-       sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
-                                  p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-       if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
+       ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
+                                           p, 0, length, DMA_FROM_DEVICE);
+       if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
                put_page(p);
                return;
        }
        atomic_inc(&xprt->sc_dma_used);
-       sge.lkey = xprt->sc_dma_lkey;
-       sge.length = length;
-
-       ctxt = svc_rdma_get_context(xprt);
-       ctxt->count = 1;
-       ctxt->pages[0] = p;
+       ctxt->sge[0].lkey = xprt->sc_dma_lkey;
+       ctxt->sge[0].length = length;
 
        /* Prepare SEND WR */
        memset(&err_wr, 0, sizeof err_wr);
        ctxt->wr_op = IB_WR_SEND;
        err_wr.wr_id = (unsigned long)ctxt;
-       err_wr.sg_list = &sge;
+       err_wr.sg_list = ctxt->sge;
        err_wr.num_sge = 1;
        err_wr.opcode = IB_WR_SEND;
        err_wr.send_flags = IB_SEND_SIGNALED;
@@ -1350,9 +1351,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
        if (ret) {
                dprintk("svcrdma: Error %d posting send for protocol error\n",
                        ret);
-               ib_dma_unmap_page(xprt->sc_cm_id->device,
-                                 sge.addr, PAGE_SIZE,
-                                 DMA_FROM_DEVICE);
+               svc_rdma_unmap_dma(ctxt);
                svc_rdma_put_context(ctxt, 1);
        }
 }