]>
Commit | Line | Data |
---|---|---|
eff5f53b AG |
1 | /* |
2 | * Copyright (c) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/pagemap.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
eff5f53b AG |
35 | #include <linux/rbtree.h> |
36 | #include <linux/dma-mapping.h> /* for DMA_*_DEVICE */ | |
37 | ||
38 | #include "rdma.h" | |
39 | ||
40 | /* | |
41 | * XXX | |
42 | * - build with sparse | |
43 | * - should we limit the size of a mr region? let transport return failure? | |
44 | * - should we detect duplicate keys on a socket? hmm. | |
45 | * - an rdma is an mlock, apply rlimit? | |
46 | */ | |
47 | ||
48 | /* | |
49 | * get the number of pages by looking at the page indices that the start and | |
50 | * end addresses fall in. | |
51 | * | |
52 | * Returns 0 if the vec is invalid. It is invalid if the number of bytes | |
53 | * causes the address to wrap or overflows an unsigned int. This comes | |
54 | * from being stored in the 'length' member of 'struct scatterlist'. | |
55 | */ | |
56 | static unsigned int rds_pages_in_vec(struct rds_iovec *vec) | |
57 | { | |
58 | if ((vec->addr + vec->bytes <= vec->addr) || | |
59 | (vec->bytes > (u64)UINT_MAX)) | |
60 | return 0; | |
61 | ||
62 | return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) - | |
63 | (vec->addr >> PAGE_SHIFT); | |
64 | } | |
65 | ||
66 | static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key, | |
67 | struct rds_mr *insert) | |
68 | { | |
69 | struct rb_node **p = &root->rb_node; | |
70 | struct rb_node *parent = NULL; | |
71 | struct rds_mr *mr; | |
72 | ||
73 | while (*p) { | |
74 | parent = *p; | |
75 | mr = rb_entry(parent, struct rds_mr, r_rb_node); | |
76 | ||
77 | if (key < mr->r_key) | |
78 | p = &(*p)->rb_left; | |
79 | else if (key > mr->r_key) | |
80 | p = &(*p)->rb_right; | |
81 | else | |
82 | return mr; | |
83 | } | |
84 | ||
85 | if (insert) { | |
86 | rb_link_node(&insert->r_rb_node, parent, p); | |
87 | rb_insert_color(&insert->r_rb_node, root); | |
88 | atomic_inc(&insert->r_refcount); | |
89 | } | |
90 | return NULL; | |
91 | } | |
92 | ||
93 | /* | |
94 | * Destroy the transport-specific part of a MR. | |
95 | */ | |
96 | static void rds_destroy_mr(struct rds_mr *mr) | |
97 | { | |
98 | struct rds_sock *rs = mr->r_sock; | |
99 | void *trans_private = NULL; | |
100 | unsigned long flags; | |
101 | ||
102 | rdsdebug("RDS: destroy mr key is %x refcnt %u\n", | |
103 | mr->r_key, atomic_read(&mr->r_refcount)); | |
104 | ||
105 | if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state)) | |
106 | return; | |
107 | ||
108 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
109 | if (!RB_EMPTY_NODE(&mr->r_rb_node)) | |
110 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); | |
111 | trans_private = mr->r_trans_private; | |
112 | mr->r_trans_private = NULL; | |
113 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
114 | ||
115 | if (trans_private) | |
116 | mr->r_trans->free_mr(trans_private, mr->r_invalidate); | |
117 | } | |
118 | ||
119 | void __rds_put_mr_final(struct rds_mr *mr) | |
120 | { | |
121 | rds_destroy_mr(mr); | |
122 | kfree(mr); | |
123 | } | |
124 | ||
125 | /* | |
126 | * By the time this is called we can't have any more ioctls called on | |
127 | * the socket so we don't need to worry about racing with others. | |
128 | */ | |
129 | void rds_rdma_drop_keys(struct rds_sock *rs) | |
130 | { | |
131 | struct rds_mr *mr; | |
132 | struct rb_node *node; | |
133 | ||
134 | /* Release any MRs associated with this socket */ | |
135 | while ((node = rb_first(&rs->rs_rdma_keys))) { | |
136 | mr = container_of(node, struct rds_mr, r_rb_node); | |
137 | if (mr->r_trans == rs->rs_transport) | |
138 | mr->r_invalidate = 0; | |
139 | rds_mr_put(mr); | |
140 | } | |
141 | ||
142 | if (rs->rs_transport && rs->rs_transport->flush_mrs) | |
143 | rs->rs_transport->flush_mrs(); | |
144 | } | |
145 | ||
146 | /* | |
147 | * Helper function to pin user pages. | |
148 | */ | |
149 | static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages, | |
150 | struct page **pages, int write) | |
151 | { | |
152 | int ret; | |
153 | ||
830eb7d5 | 154 | ret = get_user_pages_fast(user_addr, nr_pages, write, pages); |
eff5f53b | 155 | |
7acd4a79 | 156 | if (ret >= 0 && ret < nr_pages) { |
eff5f53b AG |
157 | while (ret--) |
158 | put_page(pages[ret]); | |
159 | ret = -EFAULT; | |
160 | } | |
161 | ||
162 | return ret; | |
163 | } | |
164 | ||
165 | static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, | |
166 | u64 *cookie_ret, struct rds_mr **mr_ret) | |
167 | { | |
168 | struct rds_mr *mr = NULL, *found; | |
169 | unsigned int nr_pages; | |
170 | struct page **pages = NULL; | |
171 | struct scatterlist *sg; | |
172 | void *trans_private; | |
173 | unsigned long flags; | |
174 | rds_rdma_cookie_t cookie; | |
175 | unsigned int nents; | |
176 | long i; | |
177 | int ret; | |
178 | ||
179 | if (rs->rs_bound_addr == 0) { | |
180 | ret = -ENOTCONN; /* XXX not a great errno */ | |
181 | goto out; | |
182 | } | |
183 | ||
184 | if (rs->rs_transport->get_mr == NULL) { | |
185 | ret = -EOPNOTSUPP; | |
186 | goto out; | |
187 | } | |
188 | ||
189 | nr_pages = rds_pages_in_vec(&args->vec); | |
190 | if (nr_pages == 0) { | |
191 | ret = -EINVAL; | |
192 | goto out; | |
193 | } | |
194 | ||
195 | rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n", | |
196 | args->vec.addr, args->vec.bytes, nr_pages); | |
197 | ||
198 | /* XXX clamp nr_pages to limit the size of this alloc? */ | |
199 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); | |
200 | if (pages == NULL) { | |
201 | ret = -ENOMEM; | |
202 | goto out; | |
203 | } | |
204 | ||
205 | mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL); | |
206 | if (mr == NULL) { | |
207 | ret = -ENOMEM; | |
208 | goto out; | |
209 | } | |
210 | ||
211 | atomic_set(&mr->r_refcount, 1); | |
212 | RB_CLEAR_NODE(&mr->r_rb_node); | |
213 | mr->r_trans = rs->rs_transport; | |
214 | mr->r_sock = rs; | |
215 | ||
216 | if (args->flags & RDS_RDMA_USE_ONCE) | |
217 | mr->r_use_once = 1; | |
218 | if (args->flags & RDS_RDMA_INVALIDATE) | |
219 | mr->r_invalidate = 1; | |
220 | if (args->flags & RDS_RDMA_READWRITE) | |
221 | mr->r_write = 1; | |
222 | ||
223 | /* | |
224 | * Pin the pages that make up the user buffer and transfer the page | |
225 | * pointers to the mr's sg array. We check to see if we've mapped | |
226 | * the whole region after transferring the partial page references | |
227 | * to the sg array so that we can have one page ref cleanup path. | |
228 | * | |
229 | * For now we have no flag that tells us whether the mapping is | |
230 | * r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to | |
231 | * the zero page. | |
232 | */ | |
233 | ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1); | |
234 | if (ret < 0) | |
235 | goto out; | |
236 | ||
237 | nents = ret; | |
238 | sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); | |
239 | if (sg == NULL) { | |
240 | ret = -ENOMEM; | |
241 | goto out; | |
242 | } | |
243 | WARN_ON(!nents); | |
244 | sg_init_table(sg, nents); | |
245 | ||
246 | /* Stick all pages into the scatterlist */ | |
247 | for (i = 0 ; i < nents; i++) | |
248 | sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); | |
249 | ||
250 | rdsdebug("RDS: trans_private nents is %u\n", nents); | |
251 | ||
252 | /* Obtain a transport specific MR. If this succeeds, the | |
253 | * s/g list is now owned by the MR. | |
254 | * Note that dma_map() implies that pending writes are | |
255 | * flushed to RAM, so no dma_sync is needed here. */ | |
256 | trans_private = rs->rs_transport->get_mr(sg, nents, rs, | |
257 | &mr->r_key); | |
258 | ||
259 | if (IS_ERR(trans_private)) { | |
260 | for (i = 0 ; i < nents; i++) | |
261 | put_page(sg_page(&sg[i])); | |
262 | kfree(sg); | |
263 | ret = PTR_ERR(trans_private); | |
264 | goto out; | |
265 | } | |
266 | ||
267 | mr->r_trans_private = trans_private; | |
268 | ||
269 | rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n", | |
270 | mr->r_key, (void *)(unsigned long) args->cookie_addr); | |
271 | ||
272 | /* The user may pass us an unaligned address, but we can only | |
273 | * map page aligned regions. So we keep the offset, and build | |
274 | * a 64bit cookie containing <R_Key, offset> and pass that | |
275 | * around. */ | |
276 | cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK); | |
277 | if (cookie_ret) | |
278 | *cookie_ret = cookie; | |
279 | ||
280 | if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) { | |
281 | ret = -EFAULT; | |
282 | goto out; | |
283 | } | |
284 | ||
285 | /* Inserting the new MR into the rbtree bumps its | |
286 | * reference count. */ | |
287 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
288 | found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr); | |
289 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
290 | ||
291 | BUG_ON(found && found != mr); | |
292 | ||
293 | rdsdebug("RDS: get_mr key is %x\n", mr->r_key); | |
294 | if (mr_ret) { | |
295 | atomic_inc(&mr->r_refcount); | |
296 | *mr_ret = mr; | |
297 | } | |
298 | ||
299 | ret = 0; | |
300 | out: | |
301 | kfree(pages); | |
302 | if (mr) | |
303 | rds_mr_put(mr); | |
304 | return ret; | |
305 | } | |
306 | ||
307 | int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) | |
308 | { | |
309 | struct rds_get_mr_args args; | |
310 | ||
311 | if (optlen != sizeof(struct rds_get_mr_args)) | |
312 | return -EINVAL; | |
313 | ||
314 | if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, | |
315 | sizeof(struct rds_get_mr_args))) | |
316 | return -EFAULT; | |
317 | ||
318 | return __rds_rdma_map(rs, &args, NULL, NULL); | |
319 | } | |
320 | ||
244546f0 AG |
321 | int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) |
322 | { | |
323 | struct rds_get_mr_for_dest_args args; | |
324 | struct rds_get_mr_args new_args; | |
325 | ||
326 | if (optlen != sizeof(struct rds_get_mr_for_dest_args)) | |
327 | return -EINVAL; | |
328 | ||
329 | if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, | |
330 | sizeof(struct rds_get_mr_for_dest_args))) | |
331 | return -EFAULT; | |
332 | ||
333 | /* | |
334 | * Initially, just behave like get_mr(). | |
335 | * TODO: Implement get_mr as wrapper around this | |
336 | * and deprecate it. | |
337 | */ | |
338 | new_args.vec = args.vec; | |
339 | new_args.cookie_addr = args.cookie_addr; | |
340 | new_args.flags = args.flags; | |
341 | ||
342 | return __rds_rdma_map(rs, &new_args, NULL, NULL); | |
343 | } | |
344 | ||
eff5f53b AG |
345 | /* |
346 | * Free the MR indicated by the given R_Key | |
347 | */ | |
348 | int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) | |
349 | { | |
350 | struct rds_free_mr_args args; | |
351 | struct rds_mr *mr; | |
352 | unsigned long flags; | |
353 | ||
354 | if (optlen != sizeof(struct rds_free_mr_args)) | |
355 | return -EINVAL; | |
356 | ||
357 | if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, | |
358 | sizeof(struct rds_free_mr_args))) | |
359 | return -EFAULT; | |
360 | ||
361 | /* Special case - a null cookie means flush all unused MRs */ | |
362 | if (args.cookie == 0) { | |
363 | if (!rs->rs_transport || !rs->rs_transport->flush_mrs) | |
364 | return -EINVAL; | |
365 | rs->rs_transport->flush_mrs(); | |
366 | return 0; | |
367 | } | |
368 | ||
369 | /* Look up the MR given its R_key and remove it from the rbtree | |
370 | * so nobody else finds it. | |
371 | * This should also prevent races with rds_rdma_unuse. | |
372 | */ | |
373 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
374 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL); | |
375 | if (mr) { | |
376 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); | |
377 | RB_CLEAR_NODE(&mr->r_rb_node); | |
378 | if (args.flags & RDS_RDMA_INVALIDATE) | |
379 | mr->r_invalidate = 1; | |
380 | } | |
381 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
382 | ||
383 | if (!mr) | |
384 | return -EINVAL; | |
385 | ||
386 | /* | |
387 | * call rds_destroy_mr() ourselves so that we're sure it's done by the time | |
388 | * we return. If we let rds_mr_put() do it it might not happen until | |
389 | * someone else drops their ref. | |
390 | */ | |
391 | rds_destroy_mr(mr); | |
392 | rds_mr_put(mr); | |
393 | return 0; | |
394 | } | |
395 | ||
396 | /* | |
397 | * This is called when we receive an extension header that | |
398 | * tells us this MR was used. It allows us to implement | |
399 | * use_once semantics | |
400 | */ | |
401 | void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force) | |
402 | { | |
403 | struct rds_mr *mr; | |
404 | unsigned long flags; | |
405 | int zot_me = 0; | |
406 | ||
407 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
408 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); | |
409 | if (mr && (mr->r_use_once || force)) { | |
410 | rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys); | |
411 | RB_CLEAR_NODE(&mr->r_rb_node); | |
412 | zot_me = 1; | |
413 | } else if (mr) | |
414 | atomic_inc(&mr->r_refcount); | |
415 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
416 | ||
417 | /* May have to issue a dma_sync on this memory region. | |
418 | * Note we could avoid this if the operation was a RDMA READ, | |
419 | * but at this point we can't tell. */ | |
420 | if (mr != NULL) { | |
421 | if (mr->r_trans->sync_mr) | |
422 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE); | |
423 | ||
424 | /* If the MR was marked as invalidate, this will | |
425 | * trigger an async flush. */ | |
426 | if (zot_me) | |
427 | rds_destroy_mr(mr); | |
428 | rds_mr_put(mr); | |
429 | } | |
430 | } | |
431 | ||
432 | void rds_rdma_free_op(struct rds_rdma_op *ro) | |
433 | { | |
434 | unsigned int i; | |
435 | ||
436 | for (i = 0; i < ro->r_nents; i++) { | |
437 | struct page *page = sg_page(&ro->r_sg[i]); | |
438 | ||
439 | /* Mark page dirty if it was possibly modified, which | |
440 | * is the case for a RDMA_READ which copies from remote | |
441 | * to local memory */ | |
561c7df6 | 442 | if (!ro->r_write) { |
9e2effba | 443 | BUG_ON(irqs_disabled()); |
eff5f53b | 444 | set_page_dirty(page); |
561c7df6 | 445 | } |
eff5f53b AG |
446 | put_page(page); |
447 | } | |
448 | ||
449 | kfree(ro->r_notifier); | |
450 | kfree(ro); | |
451 | } | |
452 | ||
453 | /* | |
454 | * args is a pointer to an in-kernel copy in the sendmsg cmsg. | |
455 | */ | |
456 | static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs, | |
457 | struct rds_rdma_args *args) | |
458 | { | |
459 | struct rds_iovec vec; | |
460 | struct rds_rdma_op *op = NULL; | |
461 | unsigned int nr_pages; | |
462 | unsigned int max_pages; | |
463 | unsigned int nr_bytes; | |
464 | struct page **pages = NULL; | |
465 | struct rds_iovec __user *local_vec; | |
466 | struct scatterlist *sg; | |
467 | unsigned int nr; | |
468 | unsigned int i, j; | |
469 | int ret; | |
470 | ||
471 | ||
472 | if (rs->rs_bound_addr == 0) { | |
473 | ret = -ENOTCONN; /* XXX not a great errno */ | |
474 | goto out; | |
475 | } | |
476 | ||
477 | if (args->nr_local > (u64)UINT_MAX) { | |
478 | ret = -EMSGSIZE; | |
479 | goto out; | |
480 | } | |
481 | ||
482 | nr_pages = 0; | |
483 | max_pages = 0; | |
484 | ||
485 | local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; | |
486 | ||
487 | /* figure out the number of pages in the vector */ | |
488 | for (i = 0; i < args->nr_local; i++) { | |
489 | if (copy_from_user(&vec, &local_vec[i], | |
490 | sizeof(struct rds_iovec))) { | |
491 | ret = -EFAULT; | |
492 | goto out; | |
493 | } | |
494 | ||
495 | nr = rds_pages_in_vec(&vec); | |
496 | if (nr == 0) { | |
497 | ret = -EINVAL; | |
498 | goto out; | |
499 | } | |
500 | ||
501 | max_pages = max(nr, max_pages); | |
502 | nr_pages += nr; | |
503 | } | |
504 | ||
505 | pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL); | |
506 | if (pages == NULL) { | |
507 | ret = -ENOMEM; | |
508 | goto out; | |
509 | } | |
510 | ||
511 | op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL); | |
512 | if (op == NULL) { | |
513 | ret = -ENOMEM; | |
514 | goto out; | |
515 | } | |
516 | ||
517 | op->r_write = !!(args->flags & RDS_RDMA_READWRITE); | |
518 | op->r_fence = !!(args->flags & RDS_RDMA_FENCE); | |
519 | op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME); | |
520 | op->r_recverr = rs->rs_recverr; | |
521 | WARN_ON(!nr_pages); | |
522 | sg_init_table(op->r_sg, nr_pages); | |
523 | ||
524 | if (op->r_notify || op->r_recverr) { | |
525 | /* We allocate an uninitialized notifier here, because | |
526 | * we don't want to do that in the completion handler. We | |
527 | * would have to use GFP_ATOMIC there, and don't want to deal | |
528 | * with failed allocations. | |
529 | */ | |
530 | op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL); | |
531 | if (!op->r_notifier) { | |
532 | ret = -ENOMEM; | |
533 | goto out; | |
534 | } | |
535 | op->r_notifier->n_user_token = args->user_token; | |
536 | op->r_notifier->n_status = RDS_RDMA_SUCCESS; | |
537 | } | |
538 | ||
539 | /* The cookie contains the R_Key of the remote memory region, and | |
540 | * optionally an offset into it. This is how we implement RDMA into | |
541 | * unaligned memory. | |
542 | * When setting up the RDMA, we need to add that offset to the | |
543 | * destination address (which is really an offset into the MR) | |
544 | * FIXME: We may want to move this into ib_rdma.c | |
545 | */ | |
546 | op->r_key = rds_rdma_cookie_key(args->cookie); | |
547 | op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie); | |
548 | ||
549 | nr_bytes = 0; | |
550 | ||
551 | rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n", | |
552 | (unsigned long long)args->nr_local, | |
553 | (unsigned long long)args->remote_vec.addr, | |
554 | op->r_key); | |
555 | ||
556 | for (i = 0; i < args->nr_local; i++) { | |
557 | if (copy_from_user(&vec, &local_vec[i], | |
558 | sizeof(struct rds_iovec))) { | |
559 | ret = -EFAULT; | |
560 | goto out; | |
561 | } | |
562 | ||
563 | nr = rds_pages_in_vec(&vec); | |
564 | if (nr == 0) { | |
565 | ret = -EINVAL; | |
566 | goto out; | |
567 | } | |
568 | ||
569 | rs->rs_user_addr = vec.addr; | |
570 | rs->rs_user_bytes = vec.bytes; | |
571 | ||
572 | /* did the user change the vec under us? */ | |
573 | if (nr > max_pages || op->r_nents + nr > nr_pages) { | |
574 | ret = -EINVAL; | |
575 | goto out; | |
576 | } | |
577 | /* If it's a WRITE operation, we want to pin the pages for reading. | |
578 | * If it's a READ operation, we need to pin the pages for writing. | |
579 | */ | |
580 | ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write); | |
581 | if (ret < 0) | |
582 | goto out; | |
583 | ||
584 | rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n", | |
585 | nr_bytes, nr, vec.bytes, vec.addr); | |
586 | ||
587 | nr_bytes += vec.bytes; | |
588 | ||
589 | for (j = 0; j < nr; j++) { | |
590 | unsigned int offset = vec.addr & ~PAGE_MASK; | |
591 | ||
592 | sg = &op->r_sg[op->r_nents + j]; | |
593 | sg_set_page(sg, pages[j], | |
594 | min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), | |
595 | offset); | |
596 | ||
597 | rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n", | |
598 | sg->offset, sg->length, vec.addr, vec.bytes); | |
599 | ||
600 | vec.addr += sg->length; | |
601 | vec.bytes -= sg->length; | |
602 | } | |
603 | ||
604 | op->r_nents += nr; | |
605 | } | |
606 | ||
607 | ||
608 | if (nr_bytes > args->remote_vec.bytes) { | |
609 | rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n", | |
610 | nr_bytes, | |
611 | (unsigned int) args->remote_vec.bytes); | |
612 | ret = -EINVAL; | |
613 | goto out; | |
614 | } | |
615 | op->r_bytes = nr_bytes; | |
616 | ||
617 | ret = 0; | |
618 | out: | |
619 | kfree(pages); | |
620 | if (ret) { | |
621 | if (op) | |
622 | rds_rdma_free_op(op); | |
623 | op = ERR_PTR(ret); | |
624 | } | |
625 | return op; | |
626 | } | |
627 | ||
628 | /* | |
629 | * The application asks for a RDMA transfer. | |
630 | * Extract all arguments and set up the rdma_op | |
631 | */ | |
632 | int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, | |
633 | struct cmsghdr *cmsg) | |
634 | { | |
635 | struct rds_rdma_op *op; | |
636 | ||
f64f9e71 JP |
637 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) || |
638 | rm->m_rdma_op != NULL) | |
eff5f53b AG |
639 | return -EINVAL; |
640 | ||
641 | op = rds_rdma_prepare(rs, CMSG_DATA(cmsg)); | |
642 | if (IS_ERR(op)) | |
643 | return PTR_ERR(op); | |
644 | rds_stats_inc(s_send_rdma); | |
645 | rm->m_rdma_op = op; | |
646 | return 0; | |
647 | } | |
648 | ||
649 | /* | |
650 | * The application wants us to pass an RDMA destination (aka MR) | |
651 | * to the remote | |
652 | */ | |
653 | int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm, | |
654 | struct cmsghdr *cmsg) | |
655 | { | |
656 | unsigned long flags; | |
657 | struct rds_mr *mr; | |
658 | u32 r_key; | |
659 | int err = 0; | |
660 | ||
f64f9e71 JP |
661 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) || |
662 | rm->m_rdma_cookie != 0) | |
eff5f53b AG |
663 | return -EINVAL; |
664 | ||
665 | memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie)); | |
666 | ||
667 | /* We are reusing a previously mapped MR here. Most likely, the | |
668 | * application has written to the buffer, so we need to explicitly | |
669 | * flush those writes to RAM. Otherwise the HCA may not see them | |
670 | * when doing a DMA from that buffer. | |
671 | */ | |
672 | r_key = rds_rdma_cookie_key(rm->m_rdma_cookie); | |
673 | ||
674 | spin_lock_irqsave(&rs->rs_rdma_lock, flags); | |
675 | mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); | |
676 | if (mr == NULL) | |
677 | err = -EINVAL; /* invalid r_key */ | |
678 | else | |
679 | atomic_inc(&mr->r_refcount); | |
680 | spin_unlock_irqrestore(&rs->rs_rdma_lock, flags); | |
681 | ||
682 | if (mr) { | |
683 | mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); | |
684 | rm->m_rdma_mr = mr; | |
685 | } | |
686 | return err; | |
687 | } | |
688 | ||
689 | /* | |
690 | * The application passes us an address range it wants to enable RDMA | |
691 | * to/from. We map the area, and save the <R_Key,offset> pair | |
692 | * in rm->m_rdma_cookie. This causes it to be sent along to the peer | |
693 | * in an extension header. | |
694 | */ | |
695 | int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm, | |
696 | struct cmsghdr *cmsg) | |
697 | { | |
f64f9e71 JP |
698 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) || |
699 | rm->m_rdma_cookie != 0) | |
eff5f53b AG |
700 | return -EINVAL; |
701 | ||
702 | return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr); | |
703 | } |