]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/9p/trans_rdma.c
net/9p: fix memory handling/allocation in rdma_request()
[net-next-2.6.git] / net / 9p / trans_rdma.c
CommitLineData
fc79d4b1
TT
1/*
2 * linux/fs/9p/trans_rdma.c
3 *
4 * RDMA transport layer based on the trans_fd.c implementation.
5 *
6 * Copyright (C) 2008 by Tom Tucker <tom@opengridcomputing.com>
7 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
8 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
9 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
10 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to:
23 * Free Software Foundation
24 * 51 Franklin Street, Fifth Floor
25 * Boston, MA 02111-1301 USA
26 *
27 */
28
29#include <linux/in.h>
30#include <linux/module.h>
31#include <linux/net.h>
32#include <linux/ipv6.h>
33#include <linux/kthread.h>
34#include <linux/errno.h>
35#include <linux/kernel.h>
36#include <linux/un.h>
37#include <linux/uaccess.h>
38#include <linux/inet.h>
39#include <linux/idr.h>
40#include <linux/file.h>
41#include <linux/parser.h>
42#include <linux/semaphore.h>
5a0e3ad6 43#include <linux/slab.h>
fc79d4b1
TT
44#include <net/9p/9p.h>
45#include <net/9p/client.h>
46#include <net/9p/transport.h>
47#include <rdma/ib_verbs.h>
48#include <rdma/rdma_cm.h>
fc79d4b1
TT
49
50#define P9_PORT 5640
51#define P9_RDMA_SQ_DEPTH 32
52#define P9_RDMA_RQ_DEPTH 32
53#define P9_RDMA_SEND_SGE 4
54#define P9_RDMA_RECV_SGE 4
55#define P9_RDMA_IRD 0
56#define P9_RDMA_ORD 0
57#define P9_RDMA_TIMEOUT 30000 /* 30 seconds */
58#define P9_RDMA_MAXSIZE (4*4096) /* Min SGE is 4, so we can
59 * safely advertise a maxsize
60 * of 64k */
61
62#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
63/**
64 * struct p9_trans_rdma - RDMA transport instance
65 *
66 * @state: tracks the transport state machine for connection setup and tear down
67 * @cm_id: The RDMA CM ID
68 * @pd: Protection Domain pointer
69 * @qp: Queue Pair pointer
70 * @cq: Completion Queue pointer
0e15597e 71 * @dm_mr: DMA Memory Region pointer
fc79d4b1
TT
72 * @lkey: The local access only memory region key
73 * @timeout: Number of uSecs to wait for connection management events
74 * @sq_depth: The depth of the Send Queue
75 * @sq_sem: Semaphore for the SQ
76 * @rq_depth: The depth of the Receive Queue.
0e15597e 77 * @rq_count: Count of requests in the Receive Queue.
fc79d4b1
TT
78 * @addr: The remote peer's address
79 * @req_lock: Protects the active request list
fc79d4b1
TT
80 * @cm_done: Completion event for connection management tracking
81 */
82struct p9_trans_rdma {
83 enum {
84 P9_RDMA_INIT,
85 P9_RDMA_ADDR_RESOLVED,
86 P9_RDMA_ROUTE_RESOLVED,
87 P9_RDMA_CONNECTED,
88 P9_RDMA_FLUSHING,
89 P9_RDMA_CLOSING,
90 P9_RDMA_CLOSED,
91 } state;
92 struct rdma_cm_id *cm_id;
93 struct ib_pd *pd;
94 struct ib_qp *qp;
95 struct ib_cq *cq;
96 struct ib_mr *dma_mr;
97 u32 lkey;
98 long timeout;
99 int sq_depth;
100 struct semaphore sq_sem;
101 int rq_depth;
102 atomic_t rq_count;
103 struct sockaddr_in addr;
104 spinlock_t req_lock;
105
106 struct completion cm_done;
107};
108
109/**
110 * p9_rdma_context - Keeps track of in-process WR
111 *
112 * @wc_op: The original WR op for when the CQE completes in error.
113 * @busa: Bus address to unmap when the WR completes
114 * @req: Keeps track of requests (send)
115 * @rc: Keepts track of replies (receive)
116 */
117struct p9_rdma_req;
118struct p9_rdma_context {
119 enum ib_wc_opcode wc_op;
120 dma_addr_t busa;
121 union {
122 struct p9_req_t *req;
123 struct p9_fcall *rc;
124 };
125};
126
127/**
128 * p9_rdma_opts - Collection of mount options
129 * @port: port of connection
130 * @sq_depth: The requested depth of the SQ. This really doesn't need
131 * to be any deeper than the number of threads used in the client
132 * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth
133 * @timeout: Time to wait in msecs for CM events
134 */
135struct p9_rdma_opts {
136 short port;
137 int sq_depth;
138 int rq_depth;
139 long timeout;
140};
141
142/*
143 * Option Parsing (code inspired by NFS code)
144 */
145enum {
146 /* Options that take integer arguments */
147 Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err,
148};
149
150static match_table_t tokens = {
151 {Opt_port, "port=%u"},
152 {Opt_sq_depth, "sq=%u"},
153 {Opt_rq_depth, "rq=%u"},
154 {Opt_timeout, "timeout=%u"},
155 {Opt_err, NULL},
156};
157
158/**
0e15597e
AK
159 * parse_opts - parse mount options into rdma options structure
160 * @params: options string passed from mount
161 * @opts: rdma transport-specific structure to parse options into
fc79d4b1
TT
162 *
163 * Returns 0 upon success, -ERRNO upon failure
164 */
165static int parse_opts(char *params, struct p9_rdma_opts *opts)
166{
167 char *p;
168 substring_t args[MAX_OPT_ARGS];
169 int option;
d8c8a9e3 170 char *options, *tmp_options;
fc79d4b1
TT
171 int ret;
172
173 opts->port = P9_PORT;
174 opts->sq_depth = P9_RDMA_SQ_DEPTH;
175 opts->rq_depth = P9_RDMA_RQ_DEPTH;
176 opts->timeout = P9_RDMA_TIMEOUT;
177
178 if (!params)
179 return 0;
180
d8c8a9e3
EVH
181 tmp_options = kstrdup(params, GFP_KERNEL);
182 if (!tmp_options) {
fc79d4b1
TT
183 P9_DPRINTK(P9_DEBUG_ERROR,
184 "failed to allocate copy of option string\n");
185 return -ENOMEM;
186 }
d8c8a9e3 187 options = tmp_options;
fc79d4b1
TT
188
189 while ((p = strsep(&options, ",")) != NULL) {
190 int token;
191 int r;
192 if (!*p)
193 continue;
194 token = match_token(p, tokens, args);
195 r = match_int(&args[0], &option);
196 if (r < 0) {
197 P9_DPRINTK(P9_DEBUG_ERROR,
198 "integer field, but no integer?\n");
199 ret = r;
200 continue;
201 }
202 switch (token) {
203 case Opt_port:
204 opts->port = option;
205 break;
206 case Opt_sq_depth:
207 opts->sq_depth = option;
208 break;
209 case Opt_rq_depth:
210 opts->rq_depth = option;
211 break;
212 case Opt_timeout:
213 opts->timeout = option;
214 break;
215 default:
216 continue;
217 }
218 }
219 /* RQ must be at least as large as the SQ */
220 opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
d8c8a9e3 221 kfree(tmp_options);
fc79d4b1
TT
222 return 0;
223}
224
225static int
226p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
227{
228 struct p9_client *c = id->context;
229 struct p9_trans_rdma *rdma = c->trans;
230 switch (event->event) {
231 case RDMA_CM_EVENT_ADDR_RESOLVED:
232 BUG_ON(rdma->state != P9_RDMA_INIT);
233 rdma->state = P9_RDMA_ADDR_RESOLVED;
234 break;
235
236 case RDMA_CM_EVENT_ROUTE_RESOLVED:
237 BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED);
238 rdma->state = P9_RDMA_ROUTE_RESOLVED;
239 break;
240
241 case RDMA_CM_EVENT_ESTABLISHED:
242 BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED);
243 rdma->state = P9_RDMA_CONNECTED;
244 break;
245
246 case RDMA_CM_EVENT_DISCONNECTED:
247 if (rdma)
248 rdma->state = P9_RDMA_CLOSED;
249 if (c)
250 c->status = Disconnected;
251 break;
252
253 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
254 break;
255
256 case RDMA_CM_EVENT_ADDR_CHANGE:
257 case RDMA_CM_EVENT_ROUTE_ERROR:
258 case RDMA_CM_EVENT_DEVICE_REMOVAL:
259 case RDMA_CM_EVENT_MULTICAST_JOIN:
260 case RDMA_CM_EVENT_MULTICAST_ERROR:
261 case RDMA_CM_EVENT_REJECTED:
262 case RDMA_CM_EVENT_CONNECT_REQUEST:
263 case RDMA_CM_EVENT_CONNECT_RESPONSE:
264 case RDMA_CM_EVENT_CONNECT_ERROR:
265 case RDMA_CM_EVENT_ADDR_ERROR:
266 case RDMA_CM_EVENT_UNREACHABLE:
267 c->status = Disconnected;
268 rdma_disconnect(rdma->cm_id);
269 break;
270 default:
271 BUG();
272 }
273 complete(&rdma->cm_done);
274 return 0;
275}
276
277static void
278handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
279 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
280{
281 struct p9_req_t *req;
282 int err = 0;
283 int16_t tag;
284
285 req = NULL;
286 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
287 DMA_FROM_DEVICE);
288
289 if (status != IB_WC_SUCCESS)
290 goto err_out;
291
292 err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
293 if (err)
294 goto err_out;
295
296 req = p9_tag_lookup(client, tag);
297 if (!req)
298 goto err_out;
299
300 req->rc = c->rc;
1bab88b2 301 req->status = REQ_STATUS_RCVD;
fc79d4b1
TT
302 p9_client_cb(client, req);
303
304 return;
305
306 err_out:
307 P9_DPRINTK(P9_DEBUG_ERROR, "req %p err %d status %d\n",
308 req, err, status);
309 rdma->state = P9_RDMA_FLUSHING;
310 client->status = Disconnected;
fc79d4b1
TT
311}
312
313static void
314handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
315 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
316{
317 ib_dma_unmap_single(rdma->cm_id->device,
318 c->busa, c->req->tc->size,
319 DMA_TO_DEVICE);
320}
321
322static void qp_event_handler(struct ib_event *event, void *context)
323{
324 P9_DPRINTK(P9_DEBUG_ERROR, "QP event %d context %p\n", event->event,
325 context);
326}
327
328static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
329{
330 struct p9_client *client = cq_context;
331 struct p9_trans_rdma *rdma = client->trans;
332 int ret;
333 struct ib_wc wc;
334
335 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
336 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
337 struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
338
339 switch (c->wc_op) {
340 case IB_WC_RECV:
341 atomic_dec(&rdma->rq_count);
342 handle_recv(client, rdma, c, wc.status, wc.byte_len);
343 break;
344
345 case IB_WC_SEND:
346 handle_send(client, rdma, c, wc.status, wc.byte_len);
347 up(&rdma->sq_sem);
348 break;
349
350 default:
351 printk(KERN_ERR "9prdma: unexpected completion type, "
352 "c->wc_op=%d, wc.opcode=%d, status=%d\n",
353 c->wc_op, wc.opcode, wc.status);
354 break;
355 }
356 kfree(c);
357 }
358}
359
360static void cq_event_handler(struct ib_event *e, void *v)
361{
362 P9_DPRINTK(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
363}
364
365static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
366{
367 if (!rdma)
368 return;
369
370 if (rdma->dma_mr && !IS_ERR(rdma->dma_mr))
371 ib_dereg_mr(rdma->dma_mr);
372
373 if (rdma->qp && !IS_ERR(rdma->qp))
374 ib_destroy_qp(rdma->qp);
375
376 if (rdma->pd && !IS_ERR(rdma->pd))
377 ib_dealloc_pd(rdma->pd);
378
379 if (rdma->cq && !IS_ERR(rdma->cq))
380 ib_destroy_cq(rdma->cq);
381
382 if (rdma->cm_id && !IS_ERR(rdma->cm_id))
383 rdma_destroy_id(rdma->cm_id);
384
385 kfree(rdma);
386}
387
388static int
389post_recv(struct p9_client *client, struct p9_rdma_context *c)
390{
391 struct p9_trans_rdma *rdma = client->trans;
392 struct ib_recv_wr wr, *bad_wr;
393 struct ib_sge sge;
394
395 c->busa = ib_dma_map_single(rdma->cm_id->device,
396 c->rc->sdata, client->msize,
397 DMA_FROM_DEVICE);
398 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
399 goto error;
400
401 sge.addr = c->busa;
402 sge.length = client->msize;
403 sge.lkey = rdma->lkey;
404
405 wr.next = NULL;
406 c->wc_op = IB_WC_RECV;
407 wr.wr_id = (unsigned long) c;
408 wr.sg_list = &sge;
409 wr.num_sge = 1;
410 return ib_post_recv(rdma->qp, &wr, &bad_wr);
411
412 error:
413 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
414 return -EIO;
415}
416
417static int rdma_request(struct p9_client *client, struct p9_req_t *req)
418{
419 struct p9_trans_rdma *rdma = client->trans;
420 struct ib_send_wr wr, *bad_wr;
421 struct ib_sge sge;
422 int err = 0;
423 unsigned long flags;
424 struct p9_rdma_context *c = NULL;
425 struct p9_rdma_context *rpl_context = NULL;
426
427 /* Allocate an fcall for the reply */
428 rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
1d6400c7
DB
429 if (!rpl_context) {
430 err = -ENOMEM;
fc79d4b1 431 goto err_close;
1d6400c7 432 }
fc79d4b1
TT
433
434 /*
435 * If the request has a buffer, steal it, otherwise
436 * allocate a new one. Typically, requests should already
437 * have receive buffers allocated and just swap them around
438 */
439 if (!req->rc) {
440 req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
441 GFP_KERNEL);
442 if (req->rc) {
443 req->rc->sdata = (char *) req->rc +
444 sizeof(struct p9_fcall);
445 req->rc->capacity = client->msize;
446 }
447 }
448 rpl_context->rc = req->rc;
449 if (!rpl_context->rc) {
1d6400c7
DB
450 err = -ENOMEM;
451 goto err_free2;
fc79d4b1
TT
452 }
453
454 /*
455 * Post a receive buffer for this request. We need to ensure
456 * there is a reply buffer available for every outstanding
457 * request. A flushed request can result in no reply for an
458 * outstanding request, so we must keep a count to avoid
459 * overflowing the RQ.
460 */
461 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) {
462 err = post_recv(client, rpl_context);
1d6400c7
DB
463 if (err)
464 goto err_free1;
fc79d4b1
TT
465 } else
466 atomic_dec(&rdma->rq_count);
467
468 /* remove posted receive buffer from request structure */
469 req->rc = NULL;
470
471 /* Post the request */
472 c = kmalloc(sizeof *c, GFP_KERNEL);
1d6400c7
DB
473 if (!c) {
474 err = -ENOMEM;
475 goto err_free1;
476 }
fc79d4b1
TT
477 c->req = req;
478
479 c->busa = ib_dma_map_single(rdma->cm_id->device,
480 c->req->tc->sdata, c->req->tc->size,
481 DMA_TO_DEVICE);
482 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
483 goto error;
484
485 sge.addr = c->busa;
486 sge.length = c->req->tc->size;
487 sge.lkey = rdma->lkey;
488
489 wr.next = NULL;
490 c->wc_op = IB_WC_SEND;
491 wr.wr_id = (unsigned long) c;
492 wr.opcode = IB_WR_SEND;
493 wr.send_flags = IB_SEND_SIGNALED;
494 wr.sg_list = &sge;
495 wr.num_sge = 1;
496
497 if (down_interruptible(&rdma->sq_sem))
498 goto error;
499
500 return ib_post_send(rdma->qp, &wr, &bad_wr);
501
502 error:
1d6400c7
DB
503 kfree(c);
504 kfree(rpl_context->rc);
505 kfree(rpl_context);
fc79d4b1
TT
506 P9_DPRINTK(P9_DEBUG_ERROR, "EIO\n");
507 return -EIO;
1d6400c7
DB
508 err_free1:
509 kfree(rpl_context->rc);
510 err_free2:
511 kfree(rpl_context);
fc79d4b1
TT
512 err_close:
513 spin_lock_irqsave(&rdma->req_lock, flags);
514 if (rdma->state < P9_RDMA_CLOSING) {
515 rdma->state = P9_RDMA_CLOSING;
516 spin_unlock_irqrestore(&rdma->req_lock, flags);
517 rdma_disconnect(rdma->cm_id);
518 } else
519 spin_unlock_irqrestore(&rdma->req_lock, flags);
520 return err;
521}
522
523static void rdma_close(struct p9_client *client)
524{
525 struct p9_trans_rdma *rdma;
526
527 if (!client)
528 return;
529
530 rdma = client->trans;
531 if (!rdma)
532 return;
533
534 client->status = Disconnected;
535 rdma_disconnect(rdma->cm_id);
536 rdma_destroy_trans(rdma);
537}
538
539/**
540 * alloc_rdma - Allocate and initialize the rdma transport structure
fc79d4b1
TT
541 * @opts: Mount options structure
542 */
543static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
544{
545 struct p9_trans_rdma *rdma;
546
547 rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL);
548 if (!rdma)
549 return NULL;
550
551 rdma->sq_depth = opts->sq_depth;
552 rdma->rq_depth = opts->rq_depth;
553 rdma->timeout = opts->timeout;
554 spin_lock_init(&rdma->req_lock);
555 init_completion(&rdma->cm_done);
556 sema_init(&rdma->sq_sem, rdma->sq_depth);
557 atomic_set(&rdma->rq_count, 0);
558
559 return rdma;
560}
561
562/* its not clear to me we can do anything after send has been posted */
563static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
564{
565 return 1;
566}
567
568/**
569 * trans_create_rdma - Transport method for creating atransport instance
570 * @client: client instance
571 * @addr: IP address string
572 * @args: Mount options string
573 */
574static int
575rdma_create_trans(struct p9_client *client, const char *addr, char *args)
576{
577 int err;
578 struct p9_rdma_opts opts;
579 struct p9_trans_rdma *rdma;
580 struct rdma_conn_param conn_param;
581 struct ib_qp_init_attr qp_attr;
582 struct ib_device_attr devattr;
583
584 /* Parse the transport specific mount options */
585 err = parse_opts(args, &opts);
586 if (err < 0)
587 return err;
588
589 /* Create and initialize the RDMA transport structure */
590 rdma = alloc_rdma(&opts);
591 if (!rdma)
592 return -ENOMEM;
593
594 /* Create the RDMA CM ID */
595 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP);
596 if (IS_ERR(rdma->cm_id))
597 goto error;
598
517ac45a
TT
599 /* Associate the client with the transport */
600 client->trans = rdma;
601
fc79d4b1
TT
602 /* Resolve the server's address */
603 rdma->addr.sin_family = AF_INET;
604 rdma->addr.sin_addr.s_addr = in_aton(addr);
605 rdma->addr.sin_port = htons(opts.port);
606 err = rdma_resolve_addr(rdma->cm_id, NULL,
607 (struct sockaddr *)&rdma->addr,
608 rdma->timeout);
609 if (err)
610 goto error;
611 err = wait_for_completion_interruptible(&rdma->cm_done);
612 if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED))
613 goto error;
614
615 /* Resolve the route to the server */
616 err = rdma_resolve_route(rdma->cm_id, rdma->timeout);
617 if (err)
618 goto error;
619 err = wait_for_completion_interruptible(&rdma->cm_done);
620 if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED))
621 goto error;
622
623 /* Query the device attributes */
624 err = ib_query_device(rdma->cm_id->device, &devattr);
625 if (err)
626 goto error;
627
628 /* Create the Completion Queue */
629 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
630 cq_event_handler, client,
631 opts.sq_depth + opts.rq_depth + 1, 0);
632 if (IS_ERR(rdma->cq))
633 goto error;
634 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
635
636 /* Create the Protection Domain */
637 rdma->pd = ib_alloc_pd(rdma->cm_id->device);
638 if (IS_ERR(rdma->pd))
639 goto error;
640
641 /* Cache the DMA lkey in the transport */
642 rdma->dma_mr = NULL;
643 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
644 rdma->lkey = rdma->cm_id->device->local_dma_lkey;
645 else {
646 rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE);
647 if (IS_ERR(rdma->dma_mr))
648 goto error;
649 rdma->lkey = rdma->dma_mr->lkey;
650 }
651
652 /* Create the Queue Pair */
653 memset(&qp_attr, 0, sizeof qp_attr);
654 qp_attr.event_handler = qp_event_handler;
655 qp_attr.qp_context = client;
656 qp_attr.cap.max_send_wr = opts.sq_depth;
657 qp_attr.cap.max_recv_wr = opts.rq_depth;
658 qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE;
659 qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE;
660 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
661 qp_attr.qp_type = IB_QPT_RC;
662 qp_attr.send_cq = rdma->cq;
663 qp_attr.recv_cq = rdma->cq;
664 err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr);
665 if (err)
666 goto error;
667 rdma->qp = rdma->cm_id->qp;
668
669 /* Request a connection */
670 memset(&conn_param, 0, sizeof(conn_param));
671 conn_param.private_data = NULL;
672 conn_param.private_data_len = 0;
673 conn_param.responder_resources = P9_RDMA_IRD;
674 conn_param.initiator_depth = P9_RDMA_ORD;
675 err = rdma_connect(rdma->cm_id, &conn_param);
676 if (err)
677 goto error;
678 err = wait_for_completion_interruptible(&rdma->cm_done);
679 if (err || (rdma->state != P9_RDMA_CONNECTED))
680 goto error;
681
fc79d4b1
TT
682 client->status = Connected;
683
684 return 0;
685
686error:
687 rdma_destroy_trans(rdma);
688 return -ENOTCONN;
689}
690
691static struct p9_trans_module p9_rdma_trans = {
692 .name = "rdma",
693 .maxsize = P9_RDMA_MAXSIZE,
694 .def = 0,
695 .owner = THIS_MODULE,
696 .create = rdma_create_trans,
697 .close = rdma_close,
698 .request = rdma_request,
699 .cancel = rdma_cancel,
700};
701
702/**
703 * p9_trans_rdma_init - Register the 9P RDMA transport driver
704 */
705static int __init p9_trans_rdma_init(void)
706{
707 v9fs_register_trans(&p9_rdma_trans);
708 return 0;
709}
710
711static void __exit p9_trans_rdma_exit(void)
712{
713 v9fs_unregister_trans(&p9_rdma_trans);
714}
715
716module_init(p9_trans_rdma_init);
717module_exit(p9_trans_rdma_exit);
718
719MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
720MODULE_DESCRIPTION("RDMA Transport for 9P");
721MODULE_LICENSE("Dual BSD/GPL");