]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/rds/rdma.c
RDS: move rds_shutdown_worker impl. to rds_conn_shutdown
[net-next-2.6.git] / net / rds / rdma.c
CommitLineData
eff5f53b
AG
1/*
2 * Copyright (c) 2007 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/pagemap.h>
5a0e3ad6 34#include <linux/slab.h>
eff5f53b
AG
35#include <linux/rbtree.h>
36#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
37
38#include "rdma.h"
39
40/*
41 * XXX
42 * - build with sparse
43 * - should we limit the size of a mr region? let transport return failure?
44 * - should we detect duplicate keys on a socket? hmm.
45 * - an rdma is an mlock, apply rlimit?
46 */
47
48/*
49 * get the number of pages by looking at the page indices that the start and
50 * end addresses fall in.
51 *
52 * Returns 0 if the vec is invalid. It is invalid if the number of bytes
53 * causes the address to wrap or overflows an unsigned int. This comes
54 * from being stored in the 'length' member of 'struct scatterlist'.
55 */
56static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
57{
58 if ((vec->addr + vec->bytes <= vec->addr) ||
59 (vec->bytes > (u64)UINT_MAX))
60 return 0;
61
62 return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
63 (vec->addr >> PAGE_SHIFT);
64}
65
66static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
67 struct rds_mr *insert)
68{
69 struct rb_node **p = &root->rb_node;
70 struct rb_node *parent = NULL;
71 struct rds_mr *mr;
72
73 while (*p) {
74 parent = *p;
75 mr = rb_entry(parent, struct rds_mr, r_rb_node);
76
77 if (key < mr->r_key)
78 p = &(*p)->rb_left;
79 else if (key > mr->r_key)
80 p = &(*p)->rb_right;
81 else
82 return mr;
83 }
84
85 if (insert) {
86 rb_link_node(&insert->r_rb_node, parent, p);
87 rb_insert_color(&insert->r_rb_node, root);
88 atomic_inc(&insert->r_refcount);
89 }
90 return NULL;
91}
92
93/*
94 * Destroy the transport-specific part of a MR.
95 */
96static void rds_destroy_mr(struct rds_mr *mr)
97{
98 struct rds_sock *rs = mr->r_sock;
99 void *trans_private = NULL;
100 unsigned long flags;
101
102 rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
103 mr->r_key, atomic_read(&mr->r_refcount));
104
105 if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
106 return;
107
108 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
109 if (!RB_EMPTY_NODE(&mr->r_rb_node))
110 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
111 trans_private = mr->r_trans_private;
112 mr->r_trans_private = NULL;
113 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
114
115 if (trans_private)
116 mr->r_trans->free_mr(trans_private, mr->r_invalidate);
117}
118
119void __rds_put_mr_final(struct rds_mr *mr)
120{
121 rds_destroy_mr(mr);
122 kfree(mr);
123}
124
125/*
126 * By the time this is called we can't have any more ioctls called on
127 * the socket so we don't need to worry about racing with others.
128 */
129void rds_rdma_drop_keys(struct rds_sock *rs)
130{
131 struct rds_mr *mr;
132 struct rb_node *node;
35b52c70 133 unsigned long flags;
eff5f53b
AG
134
135 /* Release any MRs associated with this socket */
35b52c70 136 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
eff5f53b
AG
137 while ((node = rb_first(&rs->rs_rdma_keys))) {
138 mr = container_of(node, struct rds_mr, r_rb_node);
139 if (mr->r_trans == rs->rs_transport)
140 mr->r_invalidate = 0;
35b52c70
TY
141 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
142 RB_CLEAR_NODE(&mr->r_rb_node);
143 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
144 rds_destroy_mr(mr);
eff5f53b 145 rds_mr_put(mr);
35b52c70 146 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
eff5f53b 147 }
35b52c70 148 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
eff5f53b
AG
149
150 if (rs->rs_transport && rs->rs_transport->flush_mrs)
151 rs->rs_transport->flush_mrs();
152}
153
154/*
155 * Helper function to pin user pages.
156 */
157static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
158 struct page **pages, int write)
159{
160 int ret;
161
830eb7d5 162 ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
eff5f53b 163
7acd4a79 164 if (ret >= 0 && ret < nr_pages) {
eff5f53b
AG
165 while (ret--)
166 put_page(pages[ret]);
167 ret = -EFAULT;
168 }
169
170 return ret;
171}
172
173static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
174 u64 *cookie_ret, struct rds_mr **mr_ret)
175{
176 struct rds_mr *mr = NULL, *found;
177 unsigned int nr_pages;
178 struct page **pages = NULL;
179 struct scatterlist *sg;
180 void *trans_private;
181 unsigned long flags;
182 rds_rdma_cookie_t cookie;
183 unsigned int nents;
184 long i;
185 int ret;
186
187 if (rs->rs_bound_addr == 0) {
188 ret = -ENOTCONN; /* XXX not a great errno */
189 goto out;
190 }
191
192 if (rs->rs_transport->get_mr == NULL) {
193 ret = -EOPNOTSUPP;
194 goto out;
195 }
196
197 nr_pages = rds_pages_in_vec(&args->vec);
198 if (nr_pages == 0) {
199 ret = -EINVAL;
200 goto out;
201 }
202
203 rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
204 args->vec.addr, args->vec.bytes, nr_pages);
205
206 /* XXX clamp nr_pages to limit the size of this alloc? */
207 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
208 if (pages == NULL) {
209 ret = -ENOMEM;
210 goto out;
211 }
212
213 mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
214 if (mr == NULL) {
215 ret = -ENOMEM;
216 goto out;
217 }
218
219 atomic_set(&mr->r_refcount, 1);
220 RB_CLEAR_NODE(&mr->r_rb_node);
221 mr->r_trans = rs->rs_transport;
222 mr->r_sock = rs;
223
224 if (args->flags & RDS_RDMA_USE_ONCE)
225 mr->r_use_once = 1;
226 if (args->flags & RDS_RDMA_INVALIDATE)
227 mr->r_invalidate = 1;
228 if (args->flags & RDS_RDMA_READWRITE)
229 mr->r_write = 1;
230
231 /*
232 * Pin the pages that make up the user buffer and transfer the page
233 * pointers to the mr's sg array. We check to see if we've mapped
234 * the whole region after transferring the partial page references
235 * to the sg array so that we can have one page ref cleanup path.
236 *
237 * For now we have no flag that tells us whether the mapping is
238 * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
239 * the zero page.
240 */
241 ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1);
242 if (ret < 0)
243 goto out;
244
245 nents = ret;
246 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
247 if (sg == NULL) {
248 ret = -ENOMEM;
249 goto out;
250 }
251 WARN_ON(!nents);
252 sg_init_table(sg, nents);
253
254 /* Stick all pages into the scatterlist */
255 for (i = 0 ; i < nents; i++)
256 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
257
258 rdsdebug("RDS: trans_private nents is %u\n", nents);
259
260 /* Obtain a transport specific MR. If this succeeds, the
261 * s/g list is now owned by the MR.
262 * Note that dma_map() implies that pending writes are
263 * flushed to RAM, so no dma_sync is needed here. */
264 trans_private = rs->rs_transport->get_mr(sg, nents, rs,
265 &mr->r_key);
266
267 if (IS_ERR(trans_private)) {
268 for (i = 0 ; i < nents; i++)
269 put_page(sg_page(&sg[i]));
270 kfree(sg);
271 ret = PTR_ERR(trans_private);
272 goto out;
273 }
274
275 mr->r_trans_private = trans_private;
276
277 rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
278 mr->r_key, (void *)(unsigned long) args->cookie_addr);
279
280 /* The user may pass us an unaligned address, but we can only
281 * map page aligned regions. So we keep the offset, and build
282 * a 64bit cookie containing <R_Key, offset> and pass that
283 * around. */
284 cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
285 if (cookie_ret)
286 *cookie_ret = cookie;
287
288 if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
289 ret = -EFAULT;
290 goto out;
291 }
292
293 /* Inserting the new MR into the rbtree bumps its
294 * reference count. */
295 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
296 found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
297 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
298
299 BUG_ON(found && found != mr);
300
301 rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
302 if (mr_ret) {
303 atomic_inc(&mr->r_refcount);
304 *mr_ret = mr;
305 }
306
307 ret = 0;
308out:
309 kfree(pages);
310 if (mr)
311 rds_mr_put(mr);
312 return ret;
313}
314
315int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
316{
317 struct rds_get_mr_args args;
318
319 if (optlen != sizeof(struct rds_get_mr_args))
320 return -EINVAL;
321
322 if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
323 sizeof(struct rds_get_mr_args)))
324 return -EFAULT;
325
326 return __rds_rdma_map(rs, &args, NULL, NULL);
327}
328
244546f0
AG
329int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
330{
331 struct rds_get_mr_for_dest_args args;
332 struct rds_get_mr_args new_args;
333
334 if (optlen != sizeof(struct rds_get_mr_for_dest_args))
335 return -EINVAL;
336
337 if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
338 sizeof(struct rds_get_mr_for_dest_args)))
339 return -EFAULT;
340
341 /*
342 * Initially, just behave like get_mr().
343 * TODO: Implement get_mr as wrapper around this
344 * and deprecate it.
345 */
346 new_args.vec = args.vec;
347 new_args.cookie_addr = args.cookie_addr;
348 new_args.flags = args.flags;
349
350 return __rds_rdma_map(rs, &new_args, NULL, NULL);
351}
352
eff5f53b
AG
353/*
354 * Free the MR indicated by the given R_Key
355 */
356int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
357{
358 struct rds_free_mr_args args;
359 struct rds_mr *mr;
360 unsigned long flags;
361
362 if (optlen != sizeof(struct rds_free_mr_args))
363 return -EINVAL;
364
365 if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
366 sizeof(struct rds_free_mr_args)))
367 return -EFAULT;
368
369 /* Special case - a null cookie means flush all unused MRs */
370 if (args.cookie == 0) {
371 if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
372 return -EINVAL;
373 rs->rs_transport->flush_mrs();
374 return 0;
375 }
376
377 /* Look up the MR given its R_key and remove it from the rbtree
378 * so nobody else finds it.
379 * This should also prevent races with rds_rdma_unuse.
380 */
381 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
382 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
383 if (mr) {
384 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
385 RB_CLEAR_NODE(&mr->r_rb_node);
386 if (args.flags & RDS_RDMA_INVALIDATE)
387 mr->r_invalidate = 1;
388 }
389 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
390
391 if (!mr)
392 return -EINVAL;
393
394 /*
395 * call rds_destroy_mr() ourselves so that we're sure it's done by the time
396 * we return. If we let rds_mr_put() do it it might not happen until
397 * someone else drops their ref.
398 */
399 rds_destroy_mr(mr);
400 rds_mr_put(mr);
401 return 0;
402}
403
404/*
405 * This is called when we receive an extension header that
406 * tells us this MR was used. It allows us to implement
407 * use_once semantics
408 */
409void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
410{
411 struct rds_mr *mr;
412 unsigned long flags;
413 int zot_me = 0;
414
415 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
416 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
417 if (mr && (mr->r_use_once || force)) {
418 rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
419 RB_CLEAR_NODE(&mr->r_rb_node);
420 zot_me = 1;
421 } else if (mr)
422 atomic_inc(&mr->r_refcount);
423 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
424
425 /* May have to issue a dma_sync on this memory region.
426 * Note we could avoid this if the operation was a RDMA READ,
427 * but at this point we can't tell. */
428 if (mr != NULL) {
429 if (mr->r_trans->sync_mr)
430 mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
431
432 /* If the MR was marked as invalidate, this will
433 * trigger an async flush. */
434 if (zot_me)
435 rds_destroy_mr(mr);
436 rds_mr_put(mr);
437 }
438}
439
440void rds_rdma_free_op(struct rds_rdma_op *ro)
441{
442 unsigned int i;
443
444 for (i = 0; i < ro->r_nents; i++) {
445 struct page *page = sg_page(&ro->r_sg[i]);
446
447 /* Mark page dirty if it was possibly modified, which
448 * is the case for a RDMA_READ which copies from remote
449 * to local memory */
561c7df6 450 if (!ro->r_write) {
9e2effba 451 BUG_ON(irqs_disabled());
eff5f53b 452 set_page_dirty(page);
561c7df6 453 }
eff5f53b
AG
454 put_page(page);
455 }
456
457 kfree(ro->r_notifier);
458 kfree(ro);
459}
460
461/*
462 * args is a pointer to an in-kernel copy in the sendmsg cmsg.
463 */
464static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
465 struct rds_rdma_args *args)
466{
467 struct rds_iovec vec;
468 struct rds_rdma_op *op = NULL;
469 unsigned int nr_pages;
470 unsigned int max_pages;
471 unsigned int nr_bytes;
472 struct page **pages = NULL;
473 struct rds_iovec __user *local_vec;
474 struct scatterlist *sg;
475 unsigned int nr;
476 unsigned int i, j;
477 int ret;
478
479
480 if (rs->rs_bound_addr == 0) {
481 ret = -ENOTCONN; /* XXX not a great errno */
482 goto out;
483 }
484
485 if (args->nr_local > (u64)UINT_MAX) {
486 ret = -EMSGSIZE;
487 goto out;
488 }
489
490 nr_pages = 0;
491 max_pages = 0;
492
493 local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
494
495 /* figure out the number of pages in the vector */
496 for (i = 0; i < args->nr_local; i++) {
497 if (copy_from_user(&vec, &local_vec[i],
498 sizeof(struct rds_iovec))) {
499 ret = -EFAULT;
500 goto out;
501 }
502
503 nr = rds_pages_in_vec(&vec);
504 if (nr == 0) {
505 ret = -EINVAL;
506 goto out;
507 }
508
509 max_pages = max(nr, max_pages);
510 nr_pages += nr;
511 }
512
513 pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
514 if (pages == NULL) {
515 ret = -ENOMEM;
516 goto out;
517 }
518
519 op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL);
520 if (op == NULL) {
521 ret = -ENOMEM;
522 goto out;
523 }
524
525 op->r_write = !!(args->flags & RDS_RDMA_READWRITE);
526 op->r_fence = !!(args->flags & RDS_RDMA_FENCE);
527 op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
528 op->r_recverr = rs->rs_recverr;
529 WARN_ON(!nr_pages);
530 sg_init_table(op->r_sg, nr_pages);
531
532 if (op->r_notify || op->r_recverr) {
533 /* We allocate an uninitialized notifier here, because
534 * we don't want to do that in the completion handler. We
535 * would have to use GFP_ATOMIC there, and don't want to deal
536 * with failed allocations.
537 */
538 op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
539 if (!op->r_notifier) {
540 ret = -ENOMEM;
541 goto out;
542 }
543 op->r_notifier->n_user_token = args->user_token;
544 op->r_notifier->n_status = RDS_RDMA_SUCCESS;
545 }
546
547 /* The cookie contains the R_Key of the remote memory region, and
548 * optionally an offset into it. This is how we implement RDMA into
549 * unaligned memory.
550 * When setting up the RDMA, we need to add that offset to the
551 * destination address (which is really an offset into the MR)
552 * FIXME: We may want to move this into ib_rdma.c
553 */
554 op->r_key = rds_rdma_cookie_key(args->cookie);
555 op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
556
557 nr_bytes = 0;
558
559 rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
560 (unsigned long long)args->nr_local,
561 (unsigned long long)args->remote_vec.addr,
562 op->r_key);
563
564 for (i = 0; i < args->nr_local; i++) {
565 if (copy_from_user(&vec, &local_vec[i],
566 sizeof(struct rds_iovec))) {
567 ret = -EFAULT;
568 goto out;
569 }
570
571 nr = rds_pages_in_vec(&vec);
572 if (nr == 0) {
573 ret = -EINVAL;
574 goto out;
575 }
576
577 rs->rs_user_addr = vec.addr;
578 rs->rs_user_bytes = vec.bytes;
579
580 /* did the user change the vec under us? */
581 if (nr > max_pages || op->r_nents + nr > nr_pages) {
582 ret = -EINVAL;
583 goto out;
584 }
585 /* If it's a WRITE operation, we want to pin the pages for reading.
586 * If it's a READ operation, we need to pin the pages for writing.
587 */
588 ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write);
589 if (ret < 0)
590 goto out;
591
592 rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
593 nr_bytes, nr, vec.bytes, vec.addr);
594
595 nr_bytes += vec.bytes;
596
597 for (j = 0; j < nr; j++) {
598 unsigned int offset = vec.addr & ~PAGE_MASK;
599
600 sg = &op->r_sg[op->r_nents + j];
601 sg_set_page(sg, pages[j],
602 min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
603 offset);
604
605 rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
606 sg->offset, sg->length, vec.addr, vec.bytes);
607
608 vec.addr += sg->length;
609 vec.bytes -= sg->length;
610 }
611
612 op->r_nents += nr;
613 }
614
615
616 if (nr_bytes > args->remote_vec.bytes) {
617 rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
618 nr_bytes,
619 (unsigned int) args->remote_vec.bytes);
620 ret = -EINVAL;
621 goto out;
622 }
623 op->r_bytes = nr_bytes;
624
625 ret = 0;
626out:
627 kfree(pages);
628 if (ret) {
629 if (op)
630 rds_rdma_free_op(op);
631 op = ERR_PTR(ret);
632 }
633 return op;
634}
635
636/*
637 * The application asks for a RDMA transfer.
638 * Extract all arguments and set up the rdma_op
639 */
640int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
641 struct cmsghdr *cmsg)
642{
643 struct rds_rdma_op *op;
644
f64f9e71
JP
645 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
646 rm->m_rdma_op != NULL)
eff5f53b
AG
647 return -EINVAL;
648
649 op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
650 if (IS_ERR(op))
651 return PTR_ERR(op);
652 rds_stats_inc(s_send_rdma);
653 rm->m_rdma_op = op;
654 return 0;
655}
656
657/*
658 * The application wants us to pass an RDMA destination (aka MR)
659 * to the remote
660 */
661int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
662 struct cmsghdr *cmsg)
663{
664 unsigned long flags;
665 struct rds_mr *mr;
666 u32 r_key;
667 int err = 0;
668
f64f9e71
JP
669 if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
670 rm->m_rdma_cookie != 0)
eff5f53b
AG
671 return -EINVAL;
672
673 memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
674
675 /* We are reusing a previously mapped MR here. Most likely, the
676 * application has written to the buffer, so we need to explicitly
677 * flush those writes to RAM. Otherwise the HCA may not see them
678 * when doing a DMA from that buffer.
679 */
680 r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
681
682 spin_lock_irqsave(&rs->rs_rdma_lock, flags);
683 mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
684 if (mr == NULL)
685 err = -EINVAL; /* invalid r_key */
686 else
687 atomic_inc(&mr->r_refcount);
688 spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
689
690 if (mr) {
691 mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
692 rm->m_rdma_mr = mr;
693 }
694 return err;
695}
696
697/*
698 * The application passes us an address range it wants to enable RDMA
699 * to/from. We map the area, and save the <R_Key,offset> pair
700 * in rm->m_rdma_cookie. This causes it to be sent along to the peer
701 * in an extension header.
702 */
703int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
704 struct cmsghdr *cmsg)
705{
f64f9e71
JP
706 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
707 rm->m_rdma_cookie != 0)
eff5f53b
AG
708 return -EINVAL;
709
710 return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
711}